filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
rdioExport.py
|
# from __future__ import unicode_literals, absolute_import
import requests
from requests_oauthlib import OAuth1Session
import sys
import os
import os.path
import keyring
import difflib
git_sub_modules = './' #Relative paths ok too
for dir in os.listdir(git_sub_modules):
path = os.path.join(git_sub_modules, dir)
if not path in sys.path:
sys.path.append(path)
from pyItunes import *
rdioURL = "http://api.rdio.com/1/"
request_token_url = "http://api.rdio.com/oauth/request_token"
base_authorization_url = "https://www.rdio.com/oauth/authorize"
access_token_url = "http://api.rdio.com/oauth/access_token"
client_key = "jbu8brgbmq63qazzvttsnv5g"
client_secret = "U2HTvUraQ8"
class CommonEqualityMixin(object):
def __hash__(self):
if self.__dict__ is not None:
return self.__dict__.__hash__()
else:
return 0
def __eq__(self, other):
if type(other) is type(self):
return self.__dict__ == other.__dict__
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return self.__unicode__()
class Song(CommonEqualityMixin):
def __init__(self, title, album="", artist="", playCount=0):
self.title = title
self.album = album
self.artist = artist
self.playCount = playCount
def __unicode__(self):
return "{0.title},{0.artist},{0.album}".format(self)
class Album(CommonEqualityMixin):
def __init__(self, title, artist="", tracks=None):
self.title = title
self.artist = artist
self.tracks = tracks
self.playCount = 0
if self.tracks is not None:
self.playCount = sum([track.playCount for track in self.tracks])/len(self.tracks)
def plays(self):
if self.tracks is not None and self.playCount == 0:
self.playCount = sum([track.playCount for track in self.tracks])/len(self.tracks)
return self.playCount
def __eq__(self, other):
if type(other) is type(self):
if self.title is None or other.title is None or self.artist is None or other.artist is None:
# print self
# print other
return self.title == other.title and self.artist == other.artist
else:
titleRatio = difflib.SequenceMatcher(None, self.title.lower(), other.title.lower()).ratio()
artistRatio = difflib.SequenceMatcher(None, self.artist.lower(), other.artist.lower()).ratio()
# print "{0} {1}".format(titleRatio, artistRatio)
close = titleRatio > 0.75 and artistRatio > 0.75
# if close and (titleRatio < 1 or artistRatio < 1):
# print("{0}/{1}:{2}\n\t{3}/{4}:{5}".format(self.title, other.title, titleRatio, self.artist, other.artist, artistRatio))
return close
else:
return False
def __hash__(self):
if self.__dict__ is not None:
dic = self.__dict__
tup = tuple(dic)
has = hash(tup)
return has
else:
return 0
def __unicode__(self):
self.plays()
return "{0.title}: \t {0.artist} ({0.playCount})".format(self)
class Artist(CommonEqualityMixin):
def __init__(self, name, albums=None):
self.name = name
self.albums = albums
def __unicode__(self):
return self.name
def songListToAlbumList(songs):
albums = {}
for song in songs:
if song.album is not None and song.artist is not None:
if song.album.lower() in albums:
albums[song.album.lower()].tracks.append(song)
else:
newAlbum = Album(song.album, song.artist, [song])
albums[song.album.lower()] = newAlbum
return list(albums.values())
def authenticate():
oauth = OAuth1Session(client_key, client_secret=client_secret, callback_uri="oob")
fetch_response = oauth.fetch_request_token(request_token_url)
resource_owner_key = fetch_response.get('oauth_token')
resource_owner_secret = fetch_response.get('oauth_token_secret')
authorization_url = oauth.authorization_url(base_authorization_url)
print('Please go here and authorize,', authorization_url)
verifier = input('Paste the PIN here: ')
rdio = OAuth1Session(client_key,
client_secret=client_secret,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret,
verifier=verifier)
oauth_tokens = rdio.fetch_access_token(access_token_url)
keyring.set_password("rdioExporty", "token", oauth_tokens['oauth_token'])
keyring.set_password("rdioExporty", "secret", oauth_tokens['oauth_token_secret'])
return rdio
def reAuthenticate():
rdio = None
token = keyring.get_password("rdioExporty", "token")
secret = keyring.get_password("rdioExporty", "secret")
if token is not None and secret is not None:
rdio = OAuth1Session(client_key,
client_secret=client_secret)
rdio._populate_attributes({'oauth_token': token,
'oauth_token_secret': secret})
else:
rdio = authenticate()
return rdio
rdio = reAuthenticate()
# Get user ID
userIDAns = rdio.post(rdioURL, {'method': "currentUser"})
userJson = None
try:
userJson = userIDAns.json()
except ValueError:
print(userIDAns)
print(userIDAns.text)
if userJson['status'] != 'ok':
print(userIDAns)
sys.exit(0)
userID = userJson['result']['key']
# Get rdio songs
songs = rdio.post(rdioURL, {'method': 'getTracksInCollection', 'user': userID, 'sort': 'playCount', 'extras': 'playCount'}).json()['result']
print("You have {0} tracks in your rdio library.".format(len(songs)))
songlist = []
for song in songs:
newSong = Song(song['name'],song['artist'],song['album'])
if 'playCount' in song:
newSong.playCount = song['playCount']
songlist.append(newSong)
albumlist = set(songListToAlbumList(songlist))
# Read iTunes Library
homeDir = os.getenv("HOME")
iTunesDir = os.path.join(homeDir, "Music", "iTunes", "iTunes Music Library.xml")
iLibrary = Library(iTunesDir)
iSonglist = [Song(song[1].name, song[1].artist, song[1].album) for song in list(iLibrary.songs.items())]
iAlbumlist = set(songListToAlbumList(iSonglist))
print("You have {0} tracks in your iTunes library.".format(len(iSonglist)))
rdioOnly = [x for x in songlist if x not in iSonglist]
itunesOnly = [x for x in iSonglist if x not in songlist]
print("Only in rdio, {0} tracks.".format(len(rdioOnly)))
print("Only in iTunes {0} tracks.".format(len(itunesOnly)))
print("In both, {0} tracks.".format(len(songlist) - len(rdioOnly)))
listyiAlbums = list(iAlbumlist)
overlap = [album for album in albumlist if album in listyiAlbums]
print("rdio albums: {0}, iTunes albums {1}, overlap {2}".format(len(albumlist), len(iAlbumlist), len(overlap)))
toBuy = [album for album in albumlist if album not in listyiAlbums]
toBuy.sort(key = lambda x: x.plays())
print("\n\n")
for album in toBuy:
print(album)
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
core/src/main/java/com/seleniumtests/browserfactory/mobile/MobileDeviceSelector.java
|
/**
* Orignal work: Copyright 2015 www.seleniumtests.com
* Modified work: Copyright 2016 www.infotel.com
* Copyright 2017-2019 B.Hecquet
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.seleniumtests.browserfactory.mobile;
import java.util.ArrayList;
import java.util.List;
import org.openqa.selenium.MutableCapabilities;
import org.openqa.selenium.remote.CapabilityType;
import com.seleniumtests.customexception.ConfigurationException;
import com.seleniumtests.driver.BrowserType;
import com.seleniumtests.driver.DriverMode;
import io.appium.java_client.remote.AndroidMobileCapabilityType;
import io.appium.java_client.remote.IOSMobileCapabilityType;
import io.appium.java_client.remote.MobileCapabilityType;
public class MobileDeviceSelector {
private AdbWrapper adbWrapper;
private InstrumentsWrapper instrumentsWrapper;
private Boolean androidReady;
private Boolean iosReady;
public MobileDeviceSelector initialize() {
try {
adbWrapper = new AdbWrapper();
androidReady = true;
} catch (ConfigurationException e) {
adbWrapper = null;
androidReady = false;
}
try {
instrumentsWrapper = new InstrumentsWrapper();
iosReady = true;
} catch (ConfigurationException e) {
instrumentsWrapper = null;
iosReady = false;
}
return this;
}
private void isInitialized() {
if (androidReady == null || iosReady == null) {
throw new ConfigurationException("You must call MobileDeviceSelector.initialize() before using it");
}
}
private List<MobileDevice> filterDevices(List<MobileDevice> deviceList, String deviceName, String platformName, String platformVersion) {
List<MobileDevice> filteredDeviceList = new ArrayList<>();
for (MobileDevice device: deviceList) {
boolean keep = true;
if (deviceName != null && device.getName() != null && !device.getName().equalsIgnoreCase(deviceName)) {
keep = false;
}
if (platformName != null && device.getPlatform() != null && !device.getPlatform().equalsIgnoreCase(platformName)) {
keep = false;
}
if (platformVersion != null && device.getVersion() != null && !device.getVersion().equalsIgnoreCase(platformVersion)) {
keep = false;
}
if (keep) {
filteredDeviceList.add(device);
}
}
return filteredDeviceList;
}
/**
* Returns the mobile device corresponding to the device name and/or OS version specified in test properties
* @throws ConfigurationException if no relevant device is found
* @return
*/
public MobileDevice getRelevantMobileDevice(MutableCapabilities capabilities) {
isInitialized();
Object deviceName = capabilities.getCapability(MobileCapabilityType.DEVICE_NAME);
Object platformName = capabilities.getCapability(CapabilityType.PLATFORM_NAME);
Object platformVersion = capabilities.getCapability(MobileCapabilityType.PLATFORM_VERSION);
if (deviceName == null
&& platformName == null
&& platformVersion == null
) {
throw new ConfigurationException("at least one mobile capaiblity must be provided: DEVICE_NAME, PLATFORM_NAME, PLATFORM_VERSION");
}
List<MobileDevice> deviceList = new ArrayList<>();
if (Boolean.TRUE.equals(androidReady)) {
deviceList.addAll(adbWrapper.getDeviceList());
}
if (Boolean.TRUE.equals(iosReady)) {
deviceList.addAll(instrumentsWrapper.parseIosDevices());
}
if (deviceList.isEmpty()) {
throw new ConfigurationException("No device found, check at least one is connected");
}
List<MobileDevice> filteredDeviceList = filterDevices(deviceList,
deviceName == null ? null: deviceName.toString(),
platformName == null ? null: platformName.toString(),
platformVersion == null ? null: platformVersion.toString()
);
if (filteredDeviceList.isEmpty()) {
throw new ConfigurationException("no matching device found among: " + deviceList);
}
// returns the first matching device
return filteredDeviceList.get(0);
}
/**
* From input capabilities, (e.g: platform, version or device real name), update capabilities
* with deviceName, platform, version, or other useful data
* @param capabilities
* @return
*/
public MutableCapabilities updateCapabilitiesWithSelectedDevice(MutableCapabilities capabilities, DriverMode driverMode) {
MobileDevice selectedDevice = getRelevantMobileDevice(capabilities);
if ("android".equals(selectedDevice.getPlatform())) {
capabilities.setCapability(MobileCapabilityType.DEVICE_NAME, selectedDevice.getId());
// set the right chromedriver executable according to android browser / chromeversion
// it's only the file name, not it's path
if (driverMode == DriverMode.LOCAL && !capabilities.getBrowserName().isEmpty()) {
String chromeDriverFile = null;
if (BrowserType.CHROME.toString().equalsIgnoreCase(capabilities.getBrowserName())) {
chromeDriverFile = selectedDevice.getBrowserInfo(BrowserType.CHROME).getDriverFileName();
} else if (BrowserType.BROWSER.toString().equalsIgnoreCase(capabilities.getBrowserName())) {
chromeDriverFile = selectedDevice.getBrowserInfo(BrowserType.BROWSER).getDriverFileName();
}
if (chromeDriverFile != null) {
// driver extraction will be done later. For example in AppiumDriverFactory
capabilities.setCapability(AndroidMobileCapabilityType.CHROMEDRIVER_EXECUTABLE, chromeDriverFile);
}
}
} else if ("ios".equalsIgnoreCase(selectedDevice.getPlatform())) {
capabilities.setCapability(MobileCapabilityType.UDID, selectedDevice.getId());
capabilities.setCapability(MobileCapabilityType.DEVICE_NAME, selectedDevice.getName());
capabilities.setCapability(IOSMobileCapabilityType.XCODE_CONFIG_FILE, System.getenv("APPIUM_HOME") + "/node_modules/appium/node_modules/appium-xcuitest-driver/WebDriverAgent/xcodeConfigFile.xcconfig");
}
capabilities.setCapability(CapabilityType.PLATFORM_NAME, selectedDevice.getPlatform());
capabilities.setCapability(MobileCapabilityType.PLATFORM_VERSION, selectedDevice.getVersion());
return capabilities;
}
public boolean isAndroidReady() {
return androidReady;
}
public void setAndroidReady(boolean androidReady) {
this.androidReady = androidReady;
}
public boolean isIosReady() {
return iosReady;
}
public void setIosReady(boolean iosReady) {
this.iosReady = iosReady;
}
}
|
[
"\"APPIUM_HOME\""
] |
[] |
[
"APPIUM_HOME"
] |
[]
|
["APPIUM_HOME"]
|
java
| 1 | 0 | |
qa/rpc-tests/p2p-acceptblock.py
|
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
'''
AcceptBlockTest -- test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test.
We have one NodeConn connection to each, test_node and white_node respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance.
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
self.connection.send_message(msg_ping(nonce=self.ping_counter))
received_pong = False
sleep_time = 0.05
while not received_pong and timeout > 0:
time.sleep(sleep_time)
timeout -= sleep_time
with mininode_lock:
if self.last_pong.nonce == self.ping_counter:
received_pong = True
self.ping_counter += 1
return received_pong
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("DESIRED", "desired"),
help="bitcoind binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"],
binary=self.options.testbinary))
self.nodes.append(start_node(1, self.options.tmpdir,
["-debug", "-whitelist=127.0.0.1"],
binary=self.options.testbinary))
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = TestNode() # connects to node0 (not whitelisted)
white_node = TestNode() # connects to node1 (whitelisted)
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
# 1. Have both nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int ("0x" + n.getbestblockhash() + "L", 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted.
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in xrange(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
print "First height 2 block accepted by both nodes"
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in xrange(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
print "Second height 2 block accepted only from whitelisted peer"
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in xrange(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
try:
self.nodes[0].getblock(blocks_h3[0].hash)
print "Unrequested more-work block accepted from non-whitelisted peer"
except:
raise AssertionError("Unrequested more work block was not processed")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
print "Successfully reorged to length 3 chain from whitelisted peer"
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in xrange(2):
for i in xrange(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
time.sleep(2)
for x in all_blocks:
try:
self.nodes[0].getblock(x.hash)
if x == all_blocks[287]:
raise AssertionError("Unrequested block too far-ahead should have been ignored")
except:
if x == all_blocks[287]:
print "Unrequested block too far-ahead not processed"
else:
raise AssertionError("Unrequested block with more work should have been accepted")
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
try:
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
print "Unrequested block far ahead of tip accepted from whitelisted peer"
except:
raise AssertionError("Unrequested block from whitelisted peer not accepted")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
print "Unrequested block that would complete more-work chain was ignored"
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_getdata = None
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_getdata
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
print "Inv at tip triggered getdata for unprocessed block"
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0]))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
print "Successfully reorged to longer chain from non-whitelisted peer"
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
|
[] |
[] |
[
"DESIRED"
] |
[]
|
["DESIRED"]
|
python
| 1 | 0 | |
app/main.py
|
import json
import os
import random
import bottle
from api import ping_response, start_response, move_response, end_response
# heroku testing
# heroku test sucks
@bottle.route('/')
def index():
return '''
Battlesnake documentation can be found at
<a href="https://docs.battlesnake.io">https://docs.battlesnake.io</a>.
'''
@bottle.route('/static/<path:path>')
def static(path):
"""
Given a path, return the static file located relative
to the static folder.
This can be used to return the snake head URL in an API response.
"""
return bottle.static_file(path, root='static/')
@bottle.post('/ping')
def ping():
"""
A keep-alive endpoint used to prevent cloud application platforms,
such as Heroku, from sleeping the application instance.
"""
return ping_response()
@bottle.post('/start')
def start():
data = bottle.request.json
"""
TODO: If you intend to have a stateful snake AI,
initialize your snake state here using the
request's data if necessary.
"""
print("start part")
print("================\n")
# print(json.dumps(data))
color = "#ff0000"
headType = "silly"
tailType = "freckled"
return start_response(color, headType, tailType)
def init(data):
# print("init")
# print("=================\n")
datastring = json.dumps(data)
datastore = json.loads(datastring)
# print(datastore)
print("Turn: " + str(datastore['turn']))
myhead = list(datastore['you']['body'][0].values())
mybody = []
mylength = len(datastore['you']['body'])
myhealth = datastore['you']['health']
for coords in datastore['you']['body']:
mybody.append(list(coords.values()))
snakexy = []
snakehead = []
snakeid = []
snakelength = []
for snake in datastore['board']['snakes']:
# onesnakexy = [] #one snake's body
snakeid.append(snake['id'])
snakelength.append(len(snake['body']));
snakehead.append(list(snake['body'][
0].values())) # append all snakes head coordinates to an array of snake heads (eachcoordsofhead array in allsnakearray) (2dArray)
for coords in snake['body']:
if list(coords.values()) not in snakexy:
snakexy.append(list(coords.values()))
height = datastore["board"]["height"]
width = datastore["board"]["width"]
wall = [] # 2d array of coordinates
for i in range(0, height):
wall.append([-1, i])
for i in range(0, height):
wall.append([width - 1, i])
for i in range(1, width - 1):
wall.append([i, 0])
for i in range(1, width - 1):
wall.append([i, height - 1])
food_x = []
food_y = []
for i in range(0, len(datastore["board"]["food"])):
food_x.append(int(datastore["board"]["food"][i]["x"]))
food_y.append(int(datastore["board"]["food"][i]["y"]))
# to get my position
my_position_x = []
my_position_y = []
for i in range(0, len(datastore["you"]["body"])):
my_position_x.append(int(datastore["you"]["body"][i]["x"]))
my_position_y.append(int(datastore["you"]["body"][i]["y"]))
return wall, myhead, mybody, mylength, myhealth, snakehead, snakexy, snakeid, snakelength, height, width, food_x, food_y, my_position_x, my_position_y
# snakexy now does not include tails that disappear in the next iteration
def dist_calc(target, test1, test2): # test1 must be zero, test 2 must be body width or height
# if the minimum is in zero, return True, if the minimum is in width or height, return False
test1_output = [abs(target - x) for x in test1]
test2_output = [abs(target - x) for x in test2]
print("test1_output\n" + "===========\n" + str(test1_output) + "\n")
print("test2_output\n" + "===========\n" + str(test2_output) + "\n")
if min(test1_output) < min(test2_output):
print("dist_calc returns True\n")
return True;
else:
print("dist_calc returns True\n")
return False;
@bottle.post('/move')
def move():
data = bottle.request.json
"""
TODO: Using the data from the endpoint request object, your
snake AI must choose a direction to move in.
"""
# print("move part================\n")
wall, myhead, mybody, mylength, myhealth, snakehead, snakexy, snakeid, snakelength, height, width, food_x, food_y, my_position_x, my_position_y = init(
data)
safe = []
# avoid all obstacles
right = [myhead[0] + 1, myhead[1]]
left = [myhead[0] - 1, myhead[1]]
down = [myhead[0], myhead[1] + 1]
up = [myhead[0], myhead[1] - 1]
snakexyexcepttailplusheadposiblemoves = snakexy
snakeheadexceptmine = snakehead
snakeheadexceptmine.remove(myhead)
killpotential = []
j = 0
for onesnakehead in snakehead:
headright = [onesnakehead[0] + 1, onesnakehead[1]]
headleft = [onesnakehead[0] - 1, onesnakehead[1]]
headdown = [onesnakehead[0], onesnakehead[1] + 1]
headup = [onesnakehead[0], onesnakehead[1] - 1]
if onesnakehead == myhead: # if head is my own
j += 1
elif snakelength[j] < mylength: # if mylength is longer, dont add it as a threat, add it as a kill potential
killpotential.append(headright)
killpotential.append(headleft)
killpotential.append(headdown)
killpotential.append(headup)
j += 1
else:
if headright not in snakexyexcepttailplusheadposiblemoves:
snakexyexcepttailplusheadposiblemoves.append(headright)
if headleft not in snakexyexcepttailplusheadposiblemoves:
snakexyexcepttailplusheadposiblemoves.append(headleft)
if headup not in snakexyexcepttailplusheadposiblemoves:
snakexyexcepttailplusheadposiblemoves.append(headup)
if headdown not in snakexyexcepttailplusheadposiblemoves:
snakexyexcepttailplusheadposiblemoves.append(headdown)
safezone = []
dirkillpotential = []
if killpotential: # if there is kill potential zone, append direction to zone that kills
if right in killpotential:
dirkillpotential.append("right")
if left in killpotential:
dirkillpotential.append("left")
if down in killpotential:
dirkillpotential.append("down")
if up in killpotential:
dirkillpotential.append("up")
if right not in snakexyexcepttailplusheadposiblemoves and right[0] != height: # right direction
# right is safe
safezone.append(right)
safe.append("right")
if left not in snakexyexcepttailplusheadposiblemoves and left[0] != -1:
safezone.append(left)
safe.append("left")
if down not in snakexyexcepttailplusheadposiblemoves and down[1] != height:
safezone.append(down)
safe.append("down")
if up not in snakexyexcepttailplusheadposiblemoves and up[1] != -1:
safezone.append(up)
safe.append("up")
if not safe:
safe = snakexy # if there is no safe zone, take risk of longer snake and pray they dont go that block.
print("safe\n" + "===========\n" + str(safe) + "\n")
# print("moveresponse\n" + "==========\n" + str(direction) + "\n")
# return move_response(dirsafekill)
# DEADEND
# 1. Check every point starting from one corner and moving to the other, in either rows or columns, it doesn't matter. Once you reach a point that has three or more orthogonally adjacent walls, mark that point as a dead end, and go to 2.
# 2. Find the direction of the empty space next to this point (if any), and check every point in that direction. For each of those points: if it has two or more adjacent walls, mark it as a dead end. If it has only one wall, go to 3. If it has no walls, stop checking in this direction and continue with number 1.
# 3. In every direction that does not have a wall, repeat number 2.
safer = []
mybody_x = []
mybody_y = []
for i in range(0, len(mybody)):
mybody_x.append(mybody[i][0])
for j in range(0, len(mybody)):
mybody_y.append(mybody[j][1])
# check the lower risk dead end direction
if len(safe) == 3:
# 1st case 3 ways to go
# direction is down which have ["down", "right", "left"] choice
if "up" not in safe:
# check right and left (x do not contain any body part)
if left[0] in mybody_x and right[0] in mybody_x:
wall_body_zero = []
wall_body_width = []
body_head_y = mybody_y[0]
for num, i in enumerate(mybody_x):
if mybody_x[num] == min(mybody_x):
wall_body_zero.append(mybody_y[num])
if mybody_x[num] == max(mybody_x):
wall_body_width.append(mybody_y[num])
safer.append("down")
to_go = dist_calc(body_head_y, wall_body_zero, wall_body_width)
# if the minimum is in zero, to_go is True, if the minimum is in width or height, to_go is False
if to_go == True:
safer.append("right")
else:
safer.append("left")
else:
safer = safe
# direction is up which have ["up", "right", "left"] choice
elif "down" not in safe:
# check right and left (x do not contain any body part)
if left[0] in mybody_x and right[0] in mybody_x:
wall_body_zero = []
wall_body_width = []
body_head_y = mybody_y[0]
for num, i in enumerate(mybody_x):
if mybody_x[num] == min(mybody_x):
wall_body_zero.append(mybody_y[num])
if mybody_x[num] == max(mybody_x):
wall_body_width.append(mybody_y[num])
safer.append("up")
to_go = dist_calc(body_head_y, wall_body_zero, wall_body_width)
# if the minimum is in zero, to_go is True, if the minimum is in width or height, to_go is False
if to_go == True:
safer.append("right")
else:
safer.append("left")
else:
safer = safe
# direction is left which have ["up", "down", "left"] choice
elif "right" not in safe:
if down[1] in mybody_y and up[1] in mybody_y:
wall_body_zero = []
wall_body_height = []
body_head_x = mybody_x[0]
for num, i in enumerate(mybody_y):
if mybody_y[num] == min(mybody_y):
wall_body_zero.append(mybody_x[num])
if mybody_y[num] == max(mybody_y):
wall_body_height.append(mybody_x[num])
safer.append("left")
to_go = dist_calc(body_head_x, wall_body_zero, wall_body_height)
# if the minimum is in zero, to_go is True, if the minimum is in width or height, to_go is False
if to_go == True:
safer.append("down")
else:
safer.append("up")
else:
safer = safe
# direction is right which have ["up", "down", "right"] choice
else:
if up[1] in mybody_y and down[1] not in mybody_y:
safer.append("down")
safer.append("right")
elif down[1] in mybody_y and up[1] not in mybody_y:
safer.append("up")
safer.append("right")
elif down[1] in mybody_y and up[1] in mybody_y:
wall_body_zero = []
wall_body_height = []
body_head_x = mybody_x[0]
for num, i in enumerate(mybody_y):
if mybody_y[num] == min(mybody_y):
wall_body_zero.append(mybody_x[num])
if mybody_y[num] == max(mybody_y):
wall_body_height.append(mybody_x[num])
safer.append("right")
to_go = dist_calc(body_head_x, wall_body_zero, wall_body_height)
# if the minimum is in zero, to_go is True, if the minimum is in width or height, to_go is False
if to_go == True:
safer.append("down")
else:
safer.append("up")
else:
safer = safe
elif len(safe) == 2:
# 2nd case 2 ways to go when there is a wall or other snakes
# only consider ["up", "down"] or ["right", "left"] (when go into the wall)
# ["up", "down"] case
if "right" not in safe and "left" not in safe:
if up[1] in mybody_y and down[1] not in mybody_y:
# direction = "down"
safer.append("down")
elif down[1] in mybody_y and up[1] not in mybody_y:
# direction = "up"
safer.append("up")
elif up[1] in mybody_y and down[1] in mybody_y:
wall_body_zero = []
wall_body_height = []
body_head_x = mybody_x[0]
for num, i in enumerate(mybody_y):
if mybody_y[num] == min(mybody_y):
wall_body_zero.append(mybody_x[num])
if mybody_y[num] == max(mybody_y):
wall_body_height.append(mybody_x[num])
to_go = dist_calc(body_head_x, wall_body_zero, wall_body_height)
# if the minimum is in zero, to_go is True, if the minimum is in width or height, to_go is False
if to_go == True:
safer.append("down")
else:
safer.append("up")
else:
safer = safe
elif "up" not in safe and "down" not in safe:
print("check right/left case")
if right[0] in mybody_x and left[0] not in mybody_x:
print("check right done")
safer.append("left")
elif left[0] in mybody_x and right[0] not in mybody_x:
print("check left done")
safer.append("right")
elif left[0] in mybody_x and right[0] in mybody_x:
# if 0 in mybody_x:
# # direction = "right"
# safer.append("right")
# elif width-1 in mybody_x:
# # direction = "left"
# safer.append("left")
# else:
# check if both body are close to the wall,
# choose the direction with further body part touching the wall
wall_body_zero = []
wall_body_width = []
body_head_y = mybody_y[0]
for num, i in enumerate(mybody_x):
if mybody_x[num] == min(mybody_x):
wall_body_zero.append(mybody_y[num])
if mybody_x[num] == max(mybody_x):
wall_body_width.append(mybody_y[num])
to_go = dist_calc(body_head_y, wall_body_zero, wall_body_width)
# if the minimum is in zero, to_go is True, if the minimum is in width or height, to_go is False
if to_go == True:
safer.append("right")
else:
safer.append("left")
else:
safer = safe
else:
safer = safe
else:
safer = safe
# kill the weak snake
# print("safer")
# print(safer)
# print("direction")
# print(direction)
print("safer\n" + "===========\n" + str(safer) + "\n")
print("dirkillpotential\n" + "===========\n" + str(dirkillpotential) + "\n")
dirkillpotentialandsafer = [value for value in dirkillpotential if value in safer]
print("dirkillpotentialandsafer\n" + "===========\n" + str(dirkillpotentialandsafer) + "\n")
if myhealth > 40 and dirkillpotentialandsafer:
direction = random.choice(dirkillpotentialandsafer)
print("direction\n" + "===========\n" + str(direction) + "\n")
return move_response(direction)
# CHECKINGFOODWITHSAFER
if myhealth < 20:
# the 4 direction we can go
left_x = my_position_x[0] - 1
right_x = my_position_x[0] + 1
down_y = my_position_y[0] + 1
up_y = my_position_y[0] - 1
# now let's see who is the closest snake to us
min_dist_dict = {}
# Check for right
if "right" in safer:
distance_min = 9999999999
for i in range(0, len(food_x)):
x = food_x[i] - right_x
y = food_y[i] - my_position_y[0]
distance = x ** 2 + y ** 2
if distance_min > distance:
distance_min = distance
min_dist_dict["right"] = distance_min
if "left" in safer:
distance_min = 9999999999
for i in range(0, len(food_x)):
x = food_x[i] - left_x
y = food_y[i] - my_position_y[0]
distance = x ** 2 + y ** 2
if distance_min > distance:
distance_min = distance
min_dist_dict["left"] = distance_min
if "down" in safer:
distance_min = 9999999999
for i in range(0, len(food_x)):
x = food_x[i] - my_position_x[0]
y = food_y[i] - down_y
distance = x ** 2 + y ** 2
if distance_min > distance:
distance_min = distance
min_dist_dict["down"] = distance_min
if "up" in safer:
distance_min = 9999999999
for i in range(0, len(food_x)):
x = food_x[i] - my_position_x[0]
y = food_y[i] - up_y
distance = x ** 2 + y ** 2
if distance_min > distance:
distance_min = distance
min_dist_dict["up"] = distance_min
# dir = 0
# for i in range(0 , 3):
# if distance_min[i] == min(distance_min):
# dir = i
direction = min(min_dist_dict, key=min_dist_dict.get)
print("direction\n" + "===========\n" + str(direction) + "\n")
return move_response(direction)
direction = random.choice(safer)
return move_response(direction)
@bottle.post('/end')
def end():
data = bottle.request.json
"""
TODO: If your snake AI was stateful,
clean up any stateful objects here.
"""
print("=========")
print("end")
# print(json.dumps(data))
return end_response()
# Expose WSGI app (so gunicorn can find it)
application = bottle.default_app()
if __name__ == '__main__':
bottle.run(
application,
host=os.getenv('IP', '0.0.0.0'),
port=os.getenv('PORT', '8080'),
debug=os.getenv('DEBUG', True)
)
|
[] |
[] |
[
"PORT",
"IP",
"DEBUG"
] |
[]
|
["PORT", "IP", "DEBUG"]
|
python
| 3 | 0 | |
samples/snippets/import_data_video_classification_sample_test.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import import_data_video_classification_sample
PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT")
LOCATION = "us-central1"
GCS_SOURCE = "gs://ucaip-sample-resources/video_classification_train.jsonl"
METADATA_SCHEMA_URI = (
"gs://google-cloud-aiplatform/schema/dataset/metadata/video_1.0.0.yaml"
)
@pytest.fixture(scope="function", autouse=True)
def setup(create_dataset):
create_dataset(PROJECT_ID, LOCATION, METADATA_SCHEMA_URI)
yield
@pytest.fixture(scope="function", autouse=True)
def teardown(teardown_dataset):
yield
@pytest.mark.skip(reason="https://github.com/googleapis/java-aiplatform/issues/420")
def test_ucaip_generated_import_data_video_classification_sample_single_label_image(
capsys, shared_state
):
dataset_id = shared_state["dataset_name"].split("/")[-1]
import_data_video_classification_sample.import_data_video_classification_sample(
project=PROJECT_ID, dataset_id=dataset_id, gcs_source_uri=GCS_SOURCE,
)
out, _ = capsys.readouterr()
assert "import_data_response" in out
|
[] |
[] |
[
"BUILD_SPECIFIC_GCLOUD_PROJECT"
] |
[]
|
["BUILD_SPECIFIC_GCLOUD_PROJECT"]
|
python
| 1 | 0 | |
src/test/java/synapticloop/linode/LinodeApiAccountTest.java
|
package synapticloop.linode;
import static org.junit.Assert.*;
import org.junit.Before;
import org.junit.Test;
import synapticloop.linode.api.response.AccountInfoResponse;
import synapticloop.linode.api.response.AvailLinodePlansResponse;
import synapticloop.linode.api.response.bean.LinodePlan;
import synapticloop.linode.exception.ApiException;
public class LinodeApiAccountTest {
private LinodeApi linodeApi;
@Before
public void setup() {
linodeApi = new LinodeApi(System.getenv("LINODE_API_KEY"));
}
@Test
public void testEstimateInvoiceName() throws ApiException {
AvailLinodePlansResponse availLinodePlans = linodeApi.getAvailLinodePlans();
LinodePlan linodePlan = availLinodePlans.getLinodePlans().get(0);
try {
linodeApi.getAccountEstimateInvoice("linode_new");
} catch(ApiException ex) {
}
try {
linodeApi.getAccountEstimateInvoice("linode_new", 1l, linodePlan.getPlanId(), null);
} catch(ApiException ex) {
assertTrue(true);
}
}
@Test
public void testAccountInfo() throws ApiException {
AccountInfoResponse accountInfo = linodeApi.getAccountInfo();
assertFalse(accountInfo.hasErrors());
}
}
|
[
"\"LINODE_API_KEY\""
] |
[] |
[
"LINODE_API_KEY"
] |
[]
|
["LINODE_API_KEY"]
|
java
| 1 | 0 | |
tutorial/iris/socket/main.go
|
package main
import (
"context"
"fmt"
"log"
"github.com/kataras/iris/v12"
"github.com/kataras/iris/v12/websocket"
)
// 缺点:当client发送消息太快的时候,服务端会丢失数据
func main() {
app := iris.New()
log.SetFlags(log.Lshortfile | log.LstdFlags)
clients := make(map[string]*websocket.Conn)
app.Get("/", func(ctx iris.Context) {
ctx.Text("Hell Index Page")
})
app.Get("/client", func(ctx iris.Context) {
//log.Println(os.Getenv("PWD"))
ctx.ServeFile("./socket.html", false)
})
app.Get("/info", func(ctx iris.Context) {
data := make(map[string]interface{})
data["url"] = "info"
data["clients"] = clients
ctx.JSON(data)
})
// 设置socket
serverEvents := websocket.Namespaces{
"default": websocket.Events{
websocket.OnNamespaceConnected: func(nsConn *websocket.NSConn, msg websocket.Message) error {
// with `websocket.GetContext` you can retrieve the Iris' `Context`.
ctx := websocket.GetContext(nsConn.Conn)
log.Printf("[%s] 连接成功 [%s-%s] with IP [%s]",
nsConn, msg.Namespace, nsConn.Conn.ID(),
ctx.RemoteAddr())
return nil
},
websocket.OnNamespaceDisconnect: func(nsConn *websocket.NSConn, msg websocket.Message) error {
log.Printf("[%s] 断开连接 [%s-%s]", nsConn, msg.Namespace, nsConn.Conn.ID())
return nil
},
"message": func(nsConn *websocket.NSConn, msg websocket.Message) error {
ctx := websocket.GetContext(nsConn.Conn)
log.Printf("收到message消息: %s from [%s]-[%s]", msg.Body, nsConn.Conn.ID(), ctx.RemoteAddr())
//log.Println(msg)
nsConn.Conn.Server().Broadcast(nsConn, msg)
data := []byte(fmt.Sprintf("我收到了消息:%s", msg.Body))
nsConn.Ask(context.Background(), "message", data)
//nsConn.Conn.Write(msg)
return nil
},
"createJob": func(nsConn *websocket.NSConn, msg websocket.Message) error {
log.Println("创建job事件")
ctx := websocket.GetContext(nsConn.Conn)
log.Printf("收到createJob消息: %s(%s) from [%s]-[%s]", msg.Body, msg.Event, nsConn.Conn.ID(), ctx.RemoteAddr())
//log.Println(msg)
data := []byte(fmt.Sprintf("我收到了消息:%s", msg.Body))
nsConn.Ask(context.Background(), "createJob", data)
nsConn.Conn.Write(msg)
nsConn.Conn.Server().Broadcast(nsConn, msg)
return nil
},
},
}
//ws := websocket.New(websocket.DefaultGobwasUpgrader, websocket.Events{
// websocket.OnNativeMessage: func(nsConn *websocket.NSConn, msg websocket.Message) error {
// ctx := websocket.GetContext(nsConn.Conn)
// log.Printf("收到消息: %s from [%s]-[%s]", msg.Body, nsConn.Conn.ID(), ctx.RemoteAddr())
// //log.Println(msg)
// nsConn.Conn.Server().Broadcast(nsConn, msg)
// return nil
// },
//})
ws := websocket.New(websocket.DefaultGobwasUpgrader, serverEvents)
ws.OnConnect = func(c *websocket.Conn) error {
log.Println("收到连接:", c.ID())
clients[c.ID()] = c
data := []byte(c.ID())
msg := websocket.Message{
Event: "message",
Body: data,
}
//c.Ask(context.Background(), msg)
c.Write(msg)
return nil
}
ws.OnDisconnect = func(c *websocket.Conn) {
log.Println("断开连接:", c.ID())
}
app.Get("/ws", websocket.Handler(ws))
app.Run(iris.Addr(":9000"))
}
|
[
"\"PWD\""
] |
[] |
[
"PWD"
] |
[]
|
["PWD"]
|
go
| 1 | 0 | |
feapder/utils/tools.py
|
# -*- coding: utf-8 -*-
"""
Created on 2018-09-06 14:21
---------
@summary: 工具
---------
@author: Boris
@email: [email protected]
"""
import asyncio
import calendar
import codecs
import configparser # 读配置文件的
import datetime
import functools
import hashlib
import html
import json
import os
import pickle
import random
import re
import socket
import ssl
import string
import sys
import time
import traceback
import urllib
import urllib.parse
import uuid
import weakref
from functools import partial, wraps
from hashlib import md5
from pprint import pformat
from pprint import pprint
from urllib import request
from urllib.parse import urljoin
import execjs # pip install PyExecJS
import redis
import requests
import six
from requests.cookies import RequestsCookieJar
from w3lib.url import canonicalize_url as _canonicalize_url
import feapder.setting as setting
from feapder.utils.email_sender import EmailSender
from feapder.utils.log import log
os.environ["EXECJS_RUNTIME"] = "Node" # 设置使用node执行js
# 全局取消ssl证书验证
ssl._create_default_https_context = ssl._create_unverified_context
TIME_OUT = 30
TIMER_TIME = 5
redisdb = None
def get_redisdb():
global redisdb
if not redisdb:
ip, port = setting.REDISDB_IP_PORTS.split(":")
redisdb = redis.Redis(
host=ip,
port=port,
db=setting.REDISDB_DB,
password=setting.REDISDB_USER_PASS,
decode_responses=True,
) # redis默认端口是6379
return redisdb
# 装饰器
class Singleton(object):
def __init__(self, cls):
self._cls = cls
self._instance = {}
def __call__(self, *args, **kwargs):
if self._cls not in self._instance:
self._instance[self._cls] = self._cls(*args, **kwargs)
return self._instance[self._cls]
def log_function_time(func):
try:
@functools.wraps(func) # 将函数的原来属性付给新函数
def calculate_time(*args, **kw):
began_time = time.time()
callfunc = func(*args, **kw)
end_time = time.time()
log.debug(func.__name__ + " run time = " + str(end_time - began_time))
return callfunc
return calculate_time
except:
log.debug("求取时间无效 因为函数参数不符")
return func
def run_safe_model(module_name):
def inner_run_safe_model(func):
try:
@functools.wraps(func) # 将函数的原来属性付给新函数
def run_func(*args, **kw):
callfunc = None
try:
callfunc = func(*args, **kw)
except Exception as e:
log.error(module_name + ": " + func.__name__ + " - " + str(e))
traceback.print_exc()
return callfunc
return run_func
except Exception as e:
log.error(module_name + ": " + func.__name__ + " - " + str(e))
traceback.print_exc()
return func
return inner_run_safe_model
def memoizemethod_noargs(method):
"""Decorator to cache the result of a method (without arguments) using a
weak reference to its object
"""
cache = weakref.WeakKeyDictionary()
@functools.wraps(method)
def new_method(self, *args, **kwargs):
if self not in cache:
cache[self] = method(self, *args, **kwargs)
return cache[self]
return new_method
########################【网页解析相关】###############################
# @log_function_time
def get_html_by_requests(
url, headers=None, code="utf-8", data=None, proxies={}, with_response=False
):
html = ""
r = None
try:
if data:
r = requests.post(
url, headers=headers, timeout=TIME_OUT, data=data, proxies=proxies
)
else:
r = requests.get(url, headers=headers, timeout=TIME_OUT, proxies=proxies)
if code:
r.encoding = code
html = r.text
except Exception as e:
log.error(e)
finally:
r and r.close()
if with_response:
return html, r
else:
return html
def get_json_by_requests(
url,
params=None,
headers=None,
data=None,
proxies={},
with_response=False,
cookies=None,
):
json = {}
response = None
try:
# response = requests.get(url, params = params)
if data:
response = requests.post(
url,
headers=headers,
data=data,
params=params,
timeout=TIME_OUT,
proxies=proxies,
cookies=cookies,
)
else:
response = requests.get(
url,
headers=headers,
params=params,
timeout=TIME_OUT,
proxies=proxies,
cookies=cookies,
)
response.encoding = "utf-8"
json = response.json()
except Exception as e:
log.error(e)
finally:
response and response.close()
if with_response:
return json, response
else:
return json
def get_cookies(response):
cookies = requests.utils.dict_from_cookiejar(response.cookies)
return cookies
def get_cookies_from_str(cookie_str):
"""
>>> get_cookies_from_str("key=value; key2=value2; key3=; key4=; ")
{'key': 'value', 'key2': 'value2', 'key3': '', 'key4': ''}
Args:
cookie_str: key=value; key2=value2; key3=; key4=
Returns:
"""
cookies = {}
for cookie in cookie_str.split(";"):
cookie = cookie.strip()
if not cookie:
continue
key, value = cookie.split("=", 1)
key = key.strip()
value = value.strip()
cookies[key] = value
return cookies
def get_cookies_jar(cookies):
"""
@summary: 适用于selenium生成的cookies转requests的cookies
requests.get(xxx, cookies=jar)
参考:https://www.cnblogs.com/small-bud/p/9064674.html
---------
@param cookies: [{},{}]
---------
@result: cookie jar
"""
cookie_jar = RequestsCookieJar()
for cookie in cookies:
cookie_jar.set(cookie["name"], cookie["value"])
return cookie_jar
def get_cookies_from_selenium_cookie(cookies):
"""
@summary: 适用于selenium生成的cookies转requests的cookies
requests.get(xxx, cookies=jar)
参考:https://www.cnblogs.com/small-bud/p/9064674.html
---------
@param cookies: [{},{}]
---------
@result: cookie jar
"""
cookie_dict = {}
for cookie in cookies:
if cookie.get("name"):
cookie_dict[cookie["name"]] = cookie["value"]
return cookie_dict
def cookiesjar2str(cookies):
str_cookie = ""
for k, v in requests.utils.dict_from_cookiejar(cookies).items():
str_cookie += k
str_cookie += "="
str_cookie += v
str_cookie += "; "
return str_cookie
def cookies2str(cookies):
str_cookie = ""
for k, v in cookies.items():
str_cookie += k
str_cookie += "="
str_cookie += v
str_cookie += "; "
return str_cookie
def get_urls(
html,
stop_urls=(
"javascript",
"+",
".css",
".js",
".rar",
".xls",
".exe",
".apk",
".doc",
".jpg",
".png",
".flv",
".mp4",
),
):
# 不匹配javascript、 +、 # 这样的url
regex = r'<a.*?href.*?=.*?["|\'](.*?)["|\']'
urls = get_info(html, regex)
urls = sorted(set(urls), key=urls.index)
if stop_urls:
stop_urls = isinstance(stop_urls, str) and [stop_urls] or stop_urls
use_urls = []
for url in urls:
for stop_url in stop_urls:
if stop_url in url:
break
else:
use_urls.append(url)
urls = use_urls
return urls
def get_full_url(root_url, sub_url):
"""
@summary: 得到完整的ur
---------
@param root_url: 根url (网页的url)
@param sub_url: 子url (带有相对路径的 可以拼接成完整的)
---------
@result: 返回完整的url
"""
return urljoin(root_url, sub_url)
def joint_url(url, params):
# param_str = "?"
# for key, value in params.items():
# value = isinstance(value, str) and value or str(value)
# param_str += key + "=" + value + "&"
#
# return url + param_str[:-1]
if not params:
return url
params = urlencode(params)
separator = "?" if "?" not in url else "&"
return url + separator + params
def canonicalize_url(url):
"""
url 归一化 会参数排序 及去掉锚点
"""
return _canonicalize_url(url)
def get_url_md5(url):
url = canonicalize_url(url)
url = re.sub("^http://", "https://", url)
return get_md5(url)
def fit_url(urls, identis):
identis = isinstance(identis, str) and [identis] or identis
fit_urls = []
for link in urls:
for identi in identis:
if identi in link:
fit_urls.append(link)
return list(set(fit_urls))
def get_param(url, key):
params = url.split("?")[-1].split("&")
for param in params:
key_value = param.split("=", 1)
if key == key_value[0]:
return key_value[1]
return None
def urlencode(params):
"""
字典类型的参数转为字符串
@param params:
{
'a': 1,
'b': 2
}
@return: a=1&b=2
"""
return urllib.parse.urlencode(params)
def urldecode(url):
"""
将字符串类型的参数转为json
@param url: xxx?a=1&b=2
@return:
{
'a': 1,
'b': 2
}
"""
params_json = {}
params = url.split("?")[-1].split("&")
for param in params:
key, value = param.split("=")
params_json[key] = unquote_url(value)
return params_json
def unquote_url(url, encoding="utf-8"):
"""
@summary: 将url解码
---------
@param url:
---------
@result:
"""
return urllib.parse.unquote(url, encoding=encoding)
def quote_url(url, encoding="utf-8"):
"""
@summary: 将url编码 编码意思http://www.w3school.com.cn/tags/html_ref_urlencode.html
---------
@param url:
---------
@result:
"""
return urllib.parse.quote(url, safe="%;/?:@&=+$,", encoding=encoding)
def quote_chinese_word(text, encoding="utf-8"):
def quote_chinese_word_func(text):
chinese_word = text.group(0)
return urllib.parse.quote(chinese_word, encoding=encoding)
return re.sub("([\u4e00-\u9fa5]+)", quote_chinese_word_func, text, flags=re.S)
def unescape(str):
"""
反转译
"""
return html.unescape(str)
def excape(str):
"""
转译
"""
return html.escape(str)
_regexs = {}
# @log_function_time
def get_info(html, regexs, allow_repeat=True, fetch_one=False, split=None):
regexs = isinstance(regexs, str) and [regexs] or regexs
infos = []
for regex in regexs:
if regex == "":
continue
if regex not in _regexs.keys():
_regexs[regex] = re.compile(regex, re.S)
if fetch_one:
infos = _regexs[regex].search(html)
if infos:
infos = infos.groups()
else:
continue
else:
infos = _regexs[regex].findall(str(html))
if len(infos) > 0:
# print(regex)
break
if fetch_one:
infos = infos if infos else ("",)
return infos if len(infos) > 1 else infos[0]
else:
infos = allow_repeat and infos or sorted(set(infos), key=infos.index)
infos = split.join(infos) if split else infos
return infos
def table_json(table, save_one_blank=True):
"""
将表格转为json 适应于 key:value 在一行类的表格
@param table: 使用selector封装后的具有xpath的selector
@param save_one_blank: 保留一个空白符
@return:
"""
data = {}
trs = table.xpath(".//tr")
for tr in trs:
tds = tr.xpath("./td|./th")
for i in range(0, len(tds), 2):
if i + 1 > len(tds) - 1:
break
key = tds[i].xpath("string(.)").extract_first(default="").strip()
value = tds[i + 1].xpath("string(.)").extract_first(default="").strip()
value = replace_str(value, "[\f\n\r\t\v]", "")
value = replace_str(value, " +", " " if save_one_blank else "")
if key:
data[key] = value
return data
def get_table_row_data(table):
"""
获取表格里每一行数据
@param table: 使用selector封装后的具有xpath的selector
@return: [[],[]..]
"""
datas = []
rows = table.xpath(".//tr")
for row in rows:
cols = row.xpath("./td|./th")
row_datas = []
for col in cols:
data = col.xpath("string(.)").extract_first(default="").strip()
row_datas.append(data)
datas.append(row_datas)
return datas
def rows2json(rows, keys=None):
"""
将行数据转为json
@param rows: 每一行的数据
@param keys: json的key,空时将rows的第一行作为key
@return:
"""
data_start_pos = 0 if keys else 1
datas = []
keys = keys or rows[0]
for values in rows[data_start_pos:]:
datas.append(dict(zip(keys, values)))
return datas
def get_form_data(form):
"""
提取form中提交的数据
:param form: 使用selector封装后的具有xpath的selector
:return:
"""
data = {}
inputs = form.xpath(".//input")
for input in inputs:
name = input.xpath("./@name").extract_first()
value = input.xpath("./@value").extract_first()
if name:
data[name] = value
return data
# mac上不好使
# def get_domain(url):
# domain = ''
# try:
# domain = get_tld(url)
# except Exception as e:
# log.debug(e)
# return domain
def get_domain(url):
proto, rest = urllib.parse.splittype(url)
domain, rest = urllib.parse.splithost(rest)
return domain
def get_index_url(url):
return "/".join(url.split("/")[:3])
def get_ip(domain):
ip = socket.getaddrinfo(domain, "http")[0][4][0]
return ip
def get_localhost_ip():
"""
利用 UDP 协议来实现的,生成一个UDP包,把自己的 IP 放如到 UDP 协议头中,然后从UDP包中获取本机的IP。
这个方法并不会真实的向外部发包,所以用抓包工具是看不到的
:return:
"""
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
finally:
if s:
s.close()
return ip
def ip_to_num(ip):
import struct
ip_num = socket.ntohl(struct.unpack("I", socket.inet_aton(str(ip)))[0])
return ip_num
def is_valid_proxy(proxy, check_url=None):
"""
检验代理是否有效
@param proxy: xxx.xxx.xxx:xxx
@param check_url: 利用目标网站检查,目标网站url。默认为None, 使用代理服务器的socket检查, 但不能排除Connection closed by foreign host
@return: True / False
"""
is_valid = False
if check_url:
proxies = {"http": f"http://{proxy}", "https": f"https://{proxy}"}
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"
}
response = None
try:
response = requests.get(
check_url, headers=headers, proxies=proxies, stream=True, timeout=20
)
is_valid = True
except Exception as e:
log.error("check proxy failed: {} {}".format(e, proxy))
finally:
if response:
response.close()
else:
ip, port = proxy.split(":")
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sk:
sk.settimeout(7)
try:
sk.connect((ip, int(port))) # 检查代理服务器是否开着
is_valid = True
except Exception as e:
log.error("check proxy failed: {} {}:{}".format(e, ip, port))
return is_valid
def is_valid_url(url):
"""
验证url是否合法
:param url:
:return:
"""
if re.match(r"(^https?:/{2}\w.+$)|(ftp://)", url):
return True
else:
return False
def get_text(soup, *args):
try:
return soup.get_text()
except Exception as e:
log.error(e)
return ""
def del_html_tag(content, except_line_break=False, save_img=False, white_replaced=""):
"""
删除html标签
@param content: html内容
@param except_line_break: 保留p标签
@param save_img: 保留图片
@param white_replaced: 空白符替换
@return:
"""
content = replace_str(content, "(?i)<script(.|\n)*?</script>") # (?)忽略大小写
content = replace_str(content, "(?i)<style(.|\n)*?</style>")
content = replace_str(content, "<!--(.|\n)*?-->")
content = replace_str(
content, "(?!&[a-z]+=)&[a-z]+;?"
) # 干掉 等无用的字符 但&xxx= 这种表示参数的除外
if except_line_break:
content = content.replace("</p>", "/p")
content = replace_str(content, "<[^p].*?>")
content = content.replace("/p", "</p>")
content = replace_str(content, "[ \f\r\t\v]")
elif save_img:
content = replace_str(content, "(?!<img.+?>)<.+?>") # 替换掉除图片外的其他标签
content = replace_str(content, "(?! +)\s+", "\n") # 保留空格
content = content.strip()
else:
content = replace_str(content, "<(.|\n)*?>")
content = replace_str(content, "\s", white_replaced)
content = content.strip()
return content
def del_html_js_css(content):
content = replace_str(content, "(?i)<script(.|\n)*?</script>") # (?)忽略大小写
content = replace_str(content, "(?i)<style(.|\n)*?</style>")
content = replace_str(content, "<!--(.|\n)*?-->")
return content
def is_have_chinese(content):
regex = "[\u4e00-\u9fa5]+"
chinese_word = get_info(content, regex)
return chinese_word and True or False
def is_have_english(content):
regex = "[a-zA-Z]+"
english_words = get_info(content, regex)
return english_words and True or False
def get_chinese_word(content):
regex = "[\u4e00-\u9fa5]+"
chinese_word = get_info(content, regex)
return chinese_word
def get_english_words(content):
regex = "[a-zA-Z]+"
english_words = get_info(content, regex)
return english_words or ""
##################################################
def get_json(json_str):
"""
@summary: 取json对象
---------
@param json_str: json格式的字符串
---------
@result: 返回json对象
"""
try:
return json.loads(json_str) if json_str else {}
except Exception as e1:
try:
json_str = json_str.strip()
json_str = json_str.replace("'", '"')
keys = get_info(json_str, "(\w+):")
for key in keys:
json_str = json_str.replace(key, '"%s"' % key)
return json.loads(json_str) if json_str else {}
except Exception as e2:
log.error(
"""
e1: %s
format json_str: %s
e2: %s
"""
% (e1, json_str, e2)
)
return {}
def jsonp2json(jsonp):
"""
将jsonp转为json
@param jsonp: jQuery172013600082560040794_1553230569815({})
@return:
"""
try:
return json.loads(re.match(".*?({.*}).*", jsonp, re.S).group(1))
except:
raise ValueError("Invalid Input")
def dumps_json(json_, indent=4, sort_keys=False):
"""
@summary: 格式化json 用于打印
---------
@param json_: json格式的字符串或json对象
---------
@result: 格式化后的字符串
"""
try:
if isinstance(json_, str):
json_ = get_json(json_)
json_ = json.dumps(
json_, ensure_ascii=False, indent=indent, skipkeys=True, sort_keys=sort_keys
)
except Exception as e:
log.error(e)
json_ = pformat(json_)
return json_
def get_json_value(json_object, key):
"""
@summary:
---------
@param json_object: json对象或json格式的字符串
@param key: 建值 如果在多个层级目录下 可写 key1.key2 如{'key1':{'key2':3}}
---------
@result: 返回对应的值,如果没有,返回''
"""
current_key = ""
value = ""
try:
json_object = (
isinstance(json_object, str) and get_json(json_object) or json_object
)
current_key = key.split(".")[0]
value = json_object[current_key]
key = key[key.find(".") + 1 :]
except Exception as e:
return value
if key == current_key:
return value
else:
return get_json_value(value, key)
def get_all_keys(datas, depth=None, current_depth=0):
"""
@summary: 获取json李所有的key
---------
@param datas: dict / list
@param depth: 字典key的层级 默认不限制层级 层级从1开始
@param current_depth: 字典key的当前层级 不用传参
---------
@result: 返回json所有的key
"""
keys = []
if depth and current_depth >= depth:
return keys
if isinstance(datas, list):
for data in datas:
keys.extend(get_all_keys(data, depth, current_depth=current_depth + 1))
elif isinstance(datas, dict):
for key, value in datas.items():
keys.append(key)
if isinstance(value, dict):
keys.extend(get_all_keys(value, depth, current_depth=current_depth + 1))
return keys
def to_chinese(unicode_str):
format_str = json.loads('{"chinese":"%s"}' % unicode_str)
return format_str["chinese"]
##################################################
def replace_str(source_str, regex, replace_str=""):
"""
@summary: 替换字符串
---------
@param source_str: 原字符串
@param regex: 正则
@param replace_str: 用什么来替换 默认为''
---------
@result: 返回替换后的字符串
"""
str_info = re.compile(regex)
return str_info.sub(replace_str, source_str)
def del_redundant_blank_character(text):
"""
删除冗余的空白符, 只保留一个
:param text:
:return:
"""
return re.sub("\s+", " ", text)
##################################################
def get_conf_value(config_file, section, key):
cp = configparser.ConfigParser(allow_no_value=True)
with codecs.open(config_file, "r", encoding="utf-8") as f:
cp.read_file(f)
return cp.get(section, key)
def mkdir(path):
try:
if not os.path.exists(path):
os.makedirs(path)
except OSError as exc: # Python >2.5
pass
def write_file(filename, content, mode="w", encoding="utf-8"):
"""
@summary: 写文件
---------
@param filename: 文件名(有路径)
@param content: 内容
@param mode: 模式 w/w+ (覆盖/追加)
---------
@result:
"""
directory = os.path.dirname(filename)
mkdir(directory)
with open(filename, mode, encoding=encoding) as file:
file.writelines(content)
def read_file(filename, readlines=False, encoding="utf-8"):
"""
@summary: 读文件
---------
@param filename: 文件名(有路径)
@param readlines: 按行读取 (默认False)
---------
@result: 按行读取返回List,否则返回字符串
"""
content = None
try:
with open(filename, "r", encoding=encoding) as file:
content = file.readlines() if readlines else file.read()
except Exception as e:
log.error(e)
return content
def get_oss_file_list(oss_handler, prefix, date_range_min, date_range_max=None):
"""
获取文件列表
@param prefix: 路径前缀 如 data/car_service_line/yiche/yiche_serial_zongshu_info
@param date_range_min: 时间范围 最小值 日期分隔符为/ 如 2019/03/01 或 2019/03/01/00/00/00
@param date_range_max: 时间范围 最大值 日期分隔符为/ 如 2019/03/01 或 2019/03/01/00/00/00
@return: 每个文件路径 如 html/e_commerce_service_line/alibaba/alibaba_shop_info/2019/03/22/15/53/15/8ca8b9e4-4c77-11e9-9dee-acde48001122.json.snappy
"""
# 计算时间范围
date_range_max = date_range_max or date_range_min
date_format = "/".join(
["%Y", "%m", "%d", "%H", "%M", "%S"][: date_range_min.count("/") + 1]
)
time_interval = [
{"days": 365},
{"days": 31},
{"days": 1},
{"hours": 1},
{"minutes": 1},
{"seconds": 1},
][date_range_min.count("/")]
date_range = get_between_date(
date_range_min, date_range_max, date_format=date_format, **time_interval
)
for date in date_range:
file_folder_path = os.path.join(prefix, date)
objs = oss_handler.list(prefix=file_folder_path)
for obj in objs:
filename = obj.key
yield filename
def is_html(url):
if not url:
return False
try:
content_type = request.urlopen(url).info().get("Content-Type", "")
if "text/html" in content_type:
return True
else:
return False
except Exception as e:
log.error(e)
return False
def is_exist(file_path):
"""
@summary: 文件是否存在
---------
@param file_path:
---------
@result:
"""
return os.path.exists(file_path)
def download_file(url, file_path, *, call_func=None, proxies=None, data=None):
"""
下载文件,会自动创建文件存储目录
Args:
url: 地址
file_path: 文件存储地址
call_func: 下载成功的回调
proxies: 代理
data: 请求体
Returns:
"""
directory = os.path.dirname(file_path)
mkdir(directory)
# 进度条
def progress_callfunc(blocknum, blocksize, totalsize):
"""回调函数
@blocknum : 已经下载的数据块
@blocksize : 数据块的大小
@totalsize: 远程文件的大小
"""
percent = 100.0 * blocknum * blocksize / totalsize
if percent > 100:
percent = 100
# print ('进度条 %.2f%%' % percent, end = '\r')
sys.stdout.write("进度条 %.2f%%" % percent + "\r")
sys.stdout.flush()
if url:
try:
if proxies:
# create the object, assign it to a variable
proxy = request.ProxyHandler(proxies)
# construct a new opener using your proxy settings
opener = request.build_opener(proxy)
# install the openen on the module-level
request.install_opener(opener)
request.urlretrieve(url, file_path, progress_callfunc, data)
if callable(call_func):
call_func()
return 1
except Exception as e:
log.error(e)
return 0
else:
return 0
def get_file_list(path, ignore=[]):
templist = path.split("*")
path = templist[0]
file_type = templist[1] if len(templist) >= 2 else ""
# 递归遍历文件
def get_file_list_(path, file_type, ignore, all_file=[]):
file_list = os.listdir(path)
for file_name in file_list:
if file_name in ignore:
continue
file_path = os.path.join(path, file_name)
if os.path.isdir(file_path):
get_file_list_(file_path, file_type, ignore, all_file)
else:
if not file_type or file_name.endswith(file_type):
all_file.append(file_path)
return all_file
return get_file_list_(path, file_type, ignore) if os.path.isdir(path) else [path]
def rename_file(old_name, new_name):
os.rename(old_name, new_name)
def del_file(path, ignore=()):
files = get_file_list(path, ignore)
for file in files:
try:
os.remove(file)
except Exception as e:
log.error(
"""
删除出错: %s
Exception : %s
"""
% (file, str(e))
)
finally:
pass
def get_file_type(file_name):
"""
@summary: 取文件后缀名
---------
@param file_name:
---------
@result:
"""
try:
return os.path.splitext(file_name)[1]
except Exception as e:
log.exception(e)
def get_file_path(file_path):
"""
@summary: 取文件路径
---------
@param file_path: /root/a.py
---------
@result: /root
"""
try:
return os.path.split(file_path)[0]
except Exception as e:
log.exception(e)
#############################################
def exec_js(js_code):
"""
@summary: 执行js代码
---------
@param js_code: js代码
---------
@result: 返回执行结果
"""
return execjs.eval(js_code)
def compile_js(js_func):
"""
@summary: 编译js函数
---------
@param js_func:js函数
---------
@result: 返回函数对象 调用 fun('js_funName', param1,param2)
"""
ctx = execjs.compile(js_func)
return ctx.call
###############################################
#############################################
def date_to_timestamp(date, time_format="%Y-%m-%d %H:%M:%S"):
"""
@summary:
---------
@param date:将"2011-09-28 10:00:00"时间格式转化为时间戳
@param format:时间格式
---------
@result: 返回时间戳
"""
timestamp = time.mktime(time.strptime(date, time_format))
return int(timestamp)
def timestamp_to_date(timestamp, time_format="%Y-%m-%d %H:%M:%S"):
"""
@summary:
---------
@param timestamp: 将时间戳转化为日期
@param format: 日期格式
---------
@result: 返回日期
"""
if timestamp is None:
raise ValueError("timestamp is null")
date = time.localtime(timestamp)
return time.strftime(time_format, date)
def get_current_timestamp():
return int(time.time())
def get_current_date(date_format="%Y-%m-%d %H:%M:%S"):
return datetime.datetime.now().strftime(date_format)
# return time.strftime(date_format, time.localtime(time.time()))
def get_date_number(year=None, month=None, day=None):
"""
@summary: 获取指定日期对应的日期数
默认当前周
---------
@param year: 2010
@param month: 6
@param day: 16
---------
@result: (年号,第几周,第几天) 如 (2010, 24, 3)
"""
if year and month and day:
return datetime.date(year, month, day).isocalendar()
elif not any([year, month, day]):
return datetime.datetime.now().isocalendar()
else:
assert year, "year 不能为空"
assert month, "month 不能为空"
assert day, "day 不能为空"
def get_between_date(
begin_date, end_date=None, date_format="%Y-%m-%d", **time_interval
):
"""
@summary: 获取一段时间间隔内的日期,默认为每一天
---------
@param begin_date: 开始日期 str 如 2018-10-01
@param end_date: 默认为今日
@param date_format: 日期格式,应与begin_date的日期格式相对应
@param time_interval: 时间间隔 默认一天 支持 days、seconds、microseconds、milliseconds、minutes、hours、weeks
---------
@result: list 值为字符串
"""
date_list = []
begin_date = datetime.datetime.strptime(begin_date, date_format)
end_date = (
datetime.datetime.strptime(end_date, date_format)
if end_date
else datetime.datetime.strptime(
time.strftime(date_format, time.localtime(time.time())), date_format
)
)
time_interval = time_interval or dict(days=1)
while begin_date <= end_date:
date_str = begin_date.strftime(date_format)
date_list.append(date_str)
begin_date += datetime.timedelta(**time_interval)
if end_date.strftime(date_format) not in date_list:
date_list.append(end_date.strftime(date_format))
return date_list
def get_between_months(begin_date, end_date=None):
"""
@summary: 获取一段时间间隔内的月份
需要满一整月
---------
@param begin_date: 开始时间 如 2018-01-01
@param end_date: 默认当前时间
---------
@result: 列表 如 ['2018-01', '2018-02']
"""
def add_months(dt, months):
month = dt.month - 1 + months
year = dt.year + month // 12
month = month % 12 + 1
day = min(dt.day, calendar.monthrange(year, month)[1])
return dt.replace(year=year, month=month, day=day)
date_list = []
begin_date = datetime.datetime.strptime(begin_date, "%Y-%m-%d")
end_date = (
datetime.datetime.strptime(end_date, "%Y-%m-%d")
if end_date
else datetime.datetime.strptime(
time.strftime("%Y-%m-%d", time.localtime(time.time())), "%Y-%m-%d"
)
)
while begin_date <= end_date:
date_str = begin_date.strftime("%Y-%m")
date_list.append(date_str)
begin_date = add_months(begin_date, 1)
return date_list
def get_today_of_day(day_offset=0):
return str(datetime.date.today() + datetime.timedelta(days=day_offset))
def get_days_of_month(year, month):
"""
返回天数
"""
return calendar.monthrange(year, month)[1]
def get_firstday_of_month(date):
"""''
date format = "YYYY-MM-DD"
"""
year, month, day = date.split("-")
year, month, day = int(year), int(month), int(day)
days = "01"
if int(month) < 10:
month = "0" + str(int(month))
arr = (year, month, days)
return "-".join("%s" % i for i in arr)
def get_lastday_of_month(date):
"""''
get the last day of month
date format = "YYYY-MM-DD"
"""
year, month, day = date.split("-")
year, month, day = int(year), int(month), int(day)
days = calendar.monthrange(year, month)[1]
month = add_zero(month)
arr = (year, month, days)
return "-".join("%s" % i for i in arr)
def get_firstday_month(month_offset=0):
"""''
get the first day of month from today
month_offset is how many months
"""
(y, m, d) = get_year_month_and_days(month_offset)
d = "01"
arr = (y, m, d)
return "-".join("%s" % i for i in arr)
def get_lastday_month(month_offset=0):
"""''
get the last day of month from today
month_offset is how many months
"""
return "-".join("%s" % i for i in get_year_month_and_days(month_offset))
def get_last_month(month_offset=0):
"""''
get the last day of month from today
month_offset is how many months
"""
return "-".join("%s" % i for i in get_year_month_and_days(month_offset)[:2])
def get_year_month_and_days(month_offset=0):
"""
@summary:
---------
@param month_offset: 月份偏移量
---------
@result: ('2019', '04', '30')
"""
today = datetime.datetime.now()
year, month = today.year, today.month
this_year = int(year)
this_month = int(month)
total_month = this_month + month_offset
if month_offset >= 0:
if total_month <= 12:
days = str(get_days_of_month(this_year, total_month))
total_month = add_zero(total_month)
return (year, total_month, days)
else:
i = total_month // 12
j = total_month % 12
if j == 0:
i -= 1
j = 12
this_year += i
days = str(get_days_of_month(this_year, j))
j = add_zero(j)
return (str(this_year), str(j), days)
else:
if (total_month > 0) and (total_month < 12):
days = str(get_days_of_month(this_year, total_month))
total_month = add_zero(total_month)
return (year, total_month, days)
else:
i = total_month // 12
j = total_month % 12
if j == 0:
i -= 1
j = 12
this_year += i
days = str(get_days_of_month(this_year, j))
j = add_zero(j)
return (str(this_year), str(j), days)
def add_zero(n):
return "%02d" % n
def get_month(month_offset=0):
"""''
获取当前日期前后N月的日期
if month_offset>0, 获取当前日期前N月的日期
if month_offset<0, 获取当前日期后N月的日期
date format = "YYYY-MM-DD"
"""
today = datetime.datetime.now()
day = add_zero(today.day)
(y, m, d) = get_year_month_and_days(month_offset)
arr = (y, m, d)
if int(day) < int(d):
arr = (y, m, day)
return "-".join("%s" % i for i in arr)
@run_safe_model("format_date")
def format_date(date, old_format="", new_format="%Y-%m-%d %H:%M:%S"):
"""
@summary: 格式化日期格式
---------
@param date: 日期 eg:2017年4月17日 3时27分12秒
@param old_format: 原来的日期格式 如 '%Y年%m月%d日 %H时%M分%S秒'
%y 两位数的年份表示(00-99)
%Y 四位数的年份表示(000-9999)
%m 月份(01-12)
%d 月内中的一天(0-31)
%H 24小时制小时数(0-23)
%I 12小时制小时数(01-12)
%M 分钟数(00-59)
%S 秒(00-59)
@param new_format: 输出的日期格式
---------
@result: 格式化后的日期,类型为字符串 如2017-4-17 03:27:12
"""
if not date:
return ""
if not old_format:
regex = "(\d+)"
numbers = get_info(date, regex, allow_repeat=True)
formats = ["%Y", "%m", "%d", "%H", "%M", "%S"]
old_format = date
for i, number in enumerate(numbers[:6]):
if i == 0 and len(number) == 2: # 年份可能是两位 用小%y
old_format = old_format.replace(
number, formats[i].lower(), 1
) # 替换一次 '2017年11月30日 11:49' 防止替换11月时,替换11小时
else:
old_format = old_format.replace(number, formats[i], 1) # 替换一次
try:
date_obj = datetime.datetime.strptime(date, old_format)
if "T" in date and "Z" in date:
date_obj += datetime.timedelta(hours=8)
date_str = date_obj.strftime("%Y-%m-%d %H:%M:%S")
else:
date_str = datetime.datetime.strftime(date_obj, new_format)
except Exception as e:
log.error("日期格式化出错,old_format = %s 不符合 %s 格式" % (old_format, date))
date_str = date
return date_str
def transform_lower_num(data_str: str):
num_map = {
"一": "1",
"二": "2",
"三": "3",
"四": "4",
"五": "5",
"六": "6",
"七": "7",
"八": "8",
"九": "9",
"十": "0",
}
pattern = f'[{"|".join(num_map.keys())}|零]'
res = re.search(pattern, data_str)
if not res:
# 如果字符串中没有包含中文数字 不做处理 直接返回
return data_str
data_str = data_str.replace("0", "零")
for n in num_map:
data_str = data_str.replace(n, num_map[n])
re_data_str = re.findall("\d+", data_str)
for i in re_data_str:
if len(i) == 3:
new_i = i.replace("0", "")
data_str = data_str.replace(i, new_i, 1)
elif len(i) == 4:
new_i = i.replace("10", "")
data_str = data_str.replace(i, new_i, 1)
elif len(i) == 2 and int(i) < 10:
new_i = int(i) + 10
data_str = data_str.replace(i, str(new_i), 1)
elif len(i) == 1 and int(i) == 0:
new_i = int(i) + 10
data_str = data_str.replace(i, str(new_i), 1)
return data_str.replace("零", "0")
@run_safe_model("format_time")
def format_time(release_time, date_format="%Y-%m-%d %H:%M:%S"):
"""
>>> format_time("2个月前")
'2021-08-15 16:24:21'
>>> format_time("2月前")
'2021-08-15 16:24:36'
"""
release_time = transform_lower_num(release_time)
release_time = release_time.replace("日", "天").replace("/", "-")
if "年前" in release_time:
years = re.compile("(\d+)\s*年前").findall(release_time)
years_ago = datetime.datetime.now() - datetime.timedelta(
days=int(years[0]) * 365
)
release_time = years_ago.strftime("%Y-%m-%d %H:%M:%S")
elif "月前" in release_time:
months = re.compile("(\d+)[\s个]*月前").findall(release_time)
months_ago = datetime.datetime.now() - datetime.timedelta(
days=int(months[0]) * 30
)
release_time = months_ago.strftime("%Y-%m-%d %H:%M:%S")
elif "周前" in release_time:
weeks = re.compile("(\d+)\s*周前").findall(release_time)
weeks_ago = datetime.datetime.now() - datetime.timedelta(days=int(weeks[0]) * 7)
release_time = weeks_ago.strftime("%Y-%m-%d %H:%M:%S")
elif "天前" in release_time:
ndays = re.compile("(\d+)\s*天前").findall(release_time)
days_ago = datetime.datetime.now() - datetime.timedelta(days=int(ndays[0]))
release_time = days_ago.strftime("%Y-%m-%d %H:%M:%S")
elif "小时前" in release_time:
nhours = re.compile("(\d+)\s*小时前").findall(release_time)
hours_ago = datetime.datetime.now() - datetime.timedelta(hours=int(nhours[0]))
release_time = hours_ago.strftime("%Y-%m-%d %H:%M:%S")
elif "分钟前" in release_time:
nminutes = re.compile("(\d+)\s*分钟前").findall(release_time)
minutes_ago = datetime.datetime.now() - datetime.timedelta(
minutes=int(nminutes[0])
)
release_time = minutes_ago.strftime("%Y-%m-%d %H:%M:%S")
elif "前天" in release_time:
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=2)
release_time = release_time.replace("前天", str(yesterday))
elif "昨天" in release_time:
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
release_time = release_time.replace("昨天", str(yesterday))
elif "今天" in release_time:
release_time = release_time.replace("今天", get_current_date("%Y-%m-%d"))
elif "刚刚" in release_time:
release_time = get_current_date()
elif re.search("^\d\d:\d\d", release_time):
release_time = get_current_date("%Y-%m-%d") + " " + release_time
elif not re.compile("\d{4}").findall(release_time):
month = re.compile("\d{1,2}").findall(release_time)
if month and int(month[0]) <= int(get_current_date("%m")):
release_time = get_current_date("%Y") + "-" + release_time
else:
release_time = str(int(get_current_date("%Y")) - 1) + "-" + release_time
# 把日和小时粘在一起的拆开
template = re.compile("(\d{4}-\d{1,2}-\d{2})(\d{1,2})")
release_time = re.sub(template, r"\1 \2", release_time)
release_time = format_date(release_time, new_format=date_format)
return release_time
def to_date(date_str, date_format="%Y-%m-%d %H:%M:%S"):
return datetime.datetime.strptime(date_str, date_format)
def get_before_date(
current_date,
days,
current_date_format="%Y-%m-%d %H:%M:%S",
return_date_format="%Y-%m-%d %H:%M:%S",
):
"""
@summary: 获取之前时间
---------
@param current_date: 当前时间 str类型
@param days: 时间间隔 -1 表示前一天 1 表示后一天
@param days: 返回的时间格式
---------
@result: 字符串
"""
current_date = to_date(current_date, current_date_format)
date_obj = current_date + datetime.timedelta(days=days)
return datetime.datetime.strftime(date_obj, return_date_format)
def delay_time(sleep_time=60):
"""
@summary: 睡眠 默认1分钟
---------
@param sleep_time: 以秒为单位
---------
@result:
"""
time.sleep(sleep_time)
def format_seconds(seconds):
"""
@summary: 将秒转为时分秒
---------
@param seconds:
---------
@result: 2天3小时2分49秒
"""
seconds = int(seconds + 0.5) # 向上取整
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
times = ""
if d:
times += "{}天".format(d)
if h:
times += "{}小时".format(h)
if m:
times += "{}分".format(m)
if s:
times += "{}秒".format(s)
return times
################################################
def get_md5(*args):
"""
@summary: 获取唯一的32位md5
---------
@param *args: 参与联合去重的值
---------
@result: 7c8684bcbdfcea6697650aa53d7b1405
"""
m = hashlib.md5()
for arg in args:
m.update(str(arg).encode())
return m.hexdigest()
def get_sha1(*args):
"""
@summary: 获取唯一的40位值, 用于获取唯一的id
---------
@param *args: 参与联合去重的值
---------
@result: ba4868b3f277c8e387b55d9e3d0be7c045cdd89e
"""
sha1 = hashlib.sha1()
for arg in args:
sha1.update(str(arg).encode())
return sha1.hexdigest() # 40位
def get_base64(secret, message):
"""
@summary: 数字证书签名算法是:"HMAC-SHA256"
参考:https://www.jokecamp.com/blog/examples-of-creating-base64-hashes-using-hmac-sha256-in-different-languages/
---------
@param secret: 秘钥
@param message: 消息
---------
@result: 签名输出类型是:"base64"
"""
import hashlib
import hmac
import base64
message = bytes(message, "utf-8")
secret = bytes(secret, "utf-8")
signature = base64.b64encode(
hmac.new(secret, message, digestmod=hashlib.sha256).digest()
).decode("utf8")
return signature
def get_uuid(key1="", key2=""):
"""
@summary: 计算uuid值
可用于将两个字符串组成唯一的值。如可将域名和新闻标题组成uuid,形成联合索引
---------
@param key1:str
@param key2:str
---------
@result:
"""
uuid_object = ""
if not key1 and not key2:
uuid_object = uuid.uuid1()
else:
hash = md5(bytes(key1, "utf-8") + bytes(key2, "utf-8")).digest()
uuid_object = uuid.UUID(bytes=hash[:16], version=3)
return str(uuid_object)
def get_hash(text):
return hash(text)
##################################################
def cut_string(text, length):
"""
@summary: 将文本按指定长度拆分
---------
@param text: 文本
@param length: 拆分长度
---------
@result: 返回按指定长度拆分后形成的list
"""
text_list = re.findall(".{%d}" % length, text, re.S)
leave_text = text[len(text_list) * length :]
if leave_text:
text_list.append(leave_text)
return text_list
def get_random_string(length=1):
random_string = "".join(random.sample(string.ascii_letters + string.digits, length))
return random_string
def get_random_password(length=8, special_characters=""):
"""
@summary: 创建随机密码 默认长度为8,包含大写字母、小写字母、数字
---------
@param length: 密码长度 默认8
@param special_characters: 特殊字符
---------
@result: 指定长度的密码
"""
while True:
random_password = "".join(
random.sample(
string.ascii_letters + string.digits + special_characters, length
)
)
if (
re.search("[0-9]", random_password)
and re.search("[A-Z]", random_password)
and re.search("[a-z]", random_password)
):
if not special_characters:
break
elif set(random_password).intersection(special_characters):
break
return random_password
def get_random_email(length=None, email_types: list = None, special_characters=""):
"""
随机生成邮箱
:param length: 邮箱长度
:param email_types: 邮箱类型
:param special_characters: 特殊字符
:return:
"""
if not length:
length = random.randint(4, 12)
if not email_types:
email_types = [
"qq.com",
"163.com",
"gmail.com",
"yahoo.com",
"hotmail.com",
"yeah.net",
"126.com",
"139.com",
"sohu.com",
]
email_body = get_random_password(length, special_characters)
email_type = random.choice(email_types)
email = email_body + "@" + email_type
return email
#################################
def dumps_obj(obj):
return pickle.dumps(obj)
def loads_obj(obj_str):
return pickle.loads(obj_str)
def get_method(obj, name):
name = str(name)
try:
return getattr(obj, name)
except AttributeError:
log.error("Method %r not found in: %s" % (name, obj))
return None
def witch_workspace(project_path):
"""
@summary:
---------
@param project_path:
---------
@result:
"""
os.chdir(project_path) # 切换工作路经
############### 数据库相关 #######################
def format_sql_value(value):
if isinstance(value, str):
value = value.strip()
elif isinstance(value, (list, dict)):
value = dumps_json(value, indent=None)
elif isinstance(value, (datetime.date, datetime.time)):
value = str(value)
elif isinstance(value, bool):
value = int(value)
return value
def list2str(datas):
"""
列表转字符串
:param datas: [1, 2]
:return: (1, 2)
"""
data_str = str(tuple(datas))
data_str = re.sub(",\)$", ")", data_str)
return data_str
def make_insert_sql(
table, data, auto_update=False, update_columns=(), insert_ignore=False
):
"""
@summary: 适用于mysql, oracle数据库时间需要to_date 处理(TODO)
---------
@param table:
@param data: 表数据 json格式
@param auto_update: 使用的是replace into, 为完全覆盖已存在的数据
@param update_columns: 需要更新的列 默认全部,当指定值时,auto_update设置无效,当duplicate key冲突时更新指定的列
@param insert_ignore: 数据存在忽略
---------
@result:
"""
keys = ["`{}`".format(key) for key in data.keys()]
keys = list2str(keys).replace("'", "")
values = [format_sql_value(value) for value in data.values()]
values = list2str(values)
if update_columns:
if not isinstance(update_columns, (tuple, list)):
update_columns = [update_columns]
update_columns_ = ", ".join(
["{key}=values({key})".format(key=key) for key in update_columns]
)
sql = (
"insert%s into `{table}` {keys} values {values} on duplicate key update %s"
% (" ignore" if insert_ignore else "", update_columns_)
)
elif auto_update:
sql = "replace into `{table}` {keys} values {values}"
else:
sql = "insert%s into `{table}` {keys} values {values}" % (
" ignore" if insert_ignore else ""
)
sql = sql.format(table=table, keys=keys, values=values).replace("None", "null")
return sql
def make_update_sql(table, data, condition):
"""
@summary: 适用于mysql, oracle数据库时间需要to_date 处理(TODO)
---------
@param table:
@param data: 表数据 json格式
@param condition: where 条件
---------
@result:
"""
key_values = []
for key, value in data.items():
value = format_sql_value(value)
if isinstance(value, str):
key_values.append("`{}`={}".format(key, repr(value)))
elif value is None:
key_values.append("`{}`={}".format(key, "null"))
else:
key_values.append("`{}`={}".format(key, value))
key_values = ", ".join(key_values)
sql = "update `{table}` set {key_values} where {condition}"
sql = sql.format(table=table, key_values=key_values, condition=condition)
return sql
def make_batch_sql(
table, datas, auto_update=False, update_columns=(), update_columns_value=()
):
"""
@summary: 生产批量的sql
---------
@param table:
@param datas: 表数据 [{...}]
@param auto_update: 使用的是replace into, 为完全覆盖已存在的数据
@param update_columns: 需要更新的列 默认全部,当指定值时,auto_update设置无效,当duplicate key冲突时更新指定的列
@param update_columns_value: 需要更新的列的值 默认为datas里边对应的值, 注意 如果值为字符串类型 需要主动加单引号, 如 update_columns_value=("'test'",)
---------
@result:
"""
if not datas:
return
keys = list(datas[0].keys())
values_placeholder = ["%s"] * len(keys)
values = []
for data in datas:
value = []
for key in keys:
current_data = data.get(key)
current_data = format_sql_value(current_data)
value.append(current_data)
values.append(value)
keys = ["`{}`".format(key) for key in keys]
keys = list2str(keys).replace("'", "")
values_placeholder = list2str(values_placeholder).replace("'", "")
if update_columns:
if not isinstance(update_columns, (tuple, list)):
update_columns = [update_columns]
if update_columns_value:
update_columns_ = ", ".join(
[
"`{key}`={value}".format(key=key, value=value)
for key, value in zip(update_columns, update_columns_value)
]
)
else:
update_columns_ = ", ".join(
["`{key}`=values(`{key}`)".format(key=key) for key in update_columns]
)
sql = "insert into `{table}` {keys} values {values_placeholder} on duplicate key update {update_columns}".format(
table=table,
keys=keys,
values_placeholder=values_placeholder,
update_columns=update_columns_,
)
elif auto_update:
sql = "replace into `{table}` {keys} values {values_placeholder}".format(
table=table, keys=keys, values_placeholder=values_placeholder
)
else:
sql = "insert ignore into `{table}` {keys} values {values_placeholder}".format(
table=table, keys=keys, values_placeholder=values_placeholder
)
return sql, values
############### json相关 #######################
def key2underline(key: str, strict=True):
"""
>>> key2underline("HelloWord")
'hello_word'
>>> key2underline("SHData", strict=True)
's_h_data'
>>> key2underline("SHData", strict=False)
'sh_data'
>>> key2underline("SHDataHi", strict=False)
'sh_data_hi'
>>> key2underline("SHDataHi", strict=True)
's_h_data_hi'
>>> key2underline("dataHi", strict=True)
'data_hi'
"""
regex = "[A-Z]*" if not strict else "[A-Z]"
capitals = re.findall(regex, key)
if capitals:
for capital in capitals:
if not capital:
continue
if key.startswith(capital):
if len(capital) > 1:
key = key.replace(
capital, capital[:-1].lower() + "_" + capital[-1].lower(), 1
)
else:
key = key.replace(capital, capital.lower(), 1)
else:
if len(capital) > 1:
key = key.replace(capital, "_" + capital.lower() + "_", 1)
else:
key = key.replace(capital, "_" + capital.lower(), 1)
return key.strip("_")
def key2hump(key):
"""
下划线试变成首字母大写
"""
return key.title().replace("_", "")
def format_json_key(json_data):
json_data_correct = {}
for key, value in json_data.items():
key = key2underline(key)
json_data_correct[key] = value
return json_data_correct
def quick_to_json(text):
"""
@summary: 可快速将浏览器上的header转为json格式
---------
@param text:
---------
@result:
"""
contents = text.split("\n")
json = {}
for content in contents:
if content == "\n":
continue
content = content.strip()
regex = ["(:?.*?):(.*)", "(.*?):? +(.*)", "([^:]*)"]
result = get_info(content, regex)
result = result[0] if isinstance(result[0], tuple) else result
try:
json[result[0]] = eval(result[1].strip())
except:
json[result[0]] = result[1].strip()
return json
##############################
def print_pretty(object):
pprint(object)
def print_params2json(url):
params_json = {}
params = url.split("?")[-1].split("&")
for param in params:
key_value = param.split("=", 1)
params_json[key_value[0]] = key_value[1]
print(dumps_json(params_json))
def print_cookie2json(cookie_str_or_list):
if isinstance(cookie_str_or_list, str):
cookie_json = {}
cookies = cookie_str_or_list.split("; ")
for cookie in cookies:
name, value = cookie.split("=")
cookie_json[name] = value
else:
cookie_json = get_cookies_from_selenium_cookie(cookie_str_or_list)
print(dumps_json(cookie_json))
###############################
def flatten(x):
"""flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]
>>> flatten(["foo", "bar"])
['foo', 'bar']
>>> flatten(["foo", ["baz", 42], "bar"])
['foo', 'baz', 42, 'bar']
"""
return list(iflatten(x))
def iflatten(x):
"""iflatten(sequence) -> iterator
Similar to ``.flatten()``, but returns iterator instead"""
for el in x:
if _is_listlike(el):
for el_ in flatten(el):
yield el_
else:
yield el
def _is_listlike(x):
"""
>>> _is_listlike("foo")
False
>>> _is_listlike(5)
False
>>> _is_listlike(b"foo")
False
>>> _is_listlike([b"foo"])
True
>>> _is_listlike((b"foo",))
True
>>> _is_listlike({})
True
>>> _is_listlike(set())
True
>>> _is_listlike((x for x in range(3)))
True
>>> _is_listlike(six.moves.xrange(5))
True
"""
return hasattr(x, "__iter__") and not isinstance(x, (six.text_type, bytes))
###################
def re_def_supper_class(obj, supper_class):
"""
重新定义父类
@param obj: 类 如 class A: 则obj为A 或者 A的实例 a.__class__
@param supper_class: 父类
@return:
"""
obj.__bases__ = (supper_class,)
###################
freq_limit_record = {}
def reach_freq_limit(rate_limit, *key):
"""
频率限制
:param rate_limit: 限制时间 单位秒
:param key: 频率限制的key
:return: True / False
"""
if rate_limit == 0:
return False
msg_md5 = get_md5(*key)
key = "rate_limit:{}".format(msg_md5)
try:
if get_redisdb().get(key):
return True
get_redisdb().set(key, time.time(), ex=rate_limit)
except redis.exceptions.ConnectionError as e:
# 使用内存做频率限制
global freq_limit_record
if key not in freq_limit_record:
freq_limit_record[key] = time.time()
return False
if time.time() - freq_limit_record.get(key) < rate_limit:
return True
else:
freq_limit_record[key] = time.time()
return False
def dingding_warning(
message, message_prefix=None, rate_limit=None, url=None, user_phone=None
):
# 为了加载最新的配置
rate_limit = rate_limit if rate_limit is not None else setting.WARNING_INTERVAL
url = url or setting.DINGDING_WARNING_URL
user_phone = user_phone or setting.DINGDING_WARNING_PHONE
if not all([url, message]):
return
if reach_freq_limit(rate_limit, url, user_phone, message_prefix or message):
log.info("报警时间间隔过短,此次报警忽略。 内容 {}".format(message))
return
if isinstance(user_phone, str):
user_phone = [user_phone] if user_phone else []
data = {
"msgtype": "text",
"text": {"content": message},
"at": {"atMobiles": user_phone, "isAtAll": setting.DINGDING_WARNING_ALL},
}
headers = {"Content-Type": "application/json"}
try:
response = requests.post(
url, headers=headers, data=json.dumps(data).encode("utf8")
)
result = response.json()
response.close()
if result.get("errcode") == 0:
return True
else:
raise Exception(result.get("errmsg"))
except Exception as e:
log.error("报警发送失败。 报警内容 {}, error: {}".format(message, e))
return False
def email_warning(
message,
title,
message_prefix=None,
email_sender=None,
email_password=None,
email_receiver=None,
email_smtpserver=None,
rate_limit=None,
):
# 为了加载最新的配置
email_sender = email_sender or setting.EMAIL_SENDER
email_password = email_password or setting.EMAIL_PASSWORD
email_receiver = email_receiver or setting.EMAIL_RECEIVER
email_smtpserver = email_smtpserver or setting.EMAIL_SMTPSERVER
rate_limit = rate_limit if rate_limit is not None else setting.WARNING_INTERVAL
if not all([message, email_sender, email_password, email_receiver]):
return
if reach_freq_limit(
rate_limit, email_receiver, email_sender, message_prefix or message
):
log.info("报警时间间隔过短,此次报警忽略。 内容 {}".format(message))
return
if isinstance(email_receiver, str):
email_receiver = [email_receiver]
with EmailSender(
username=email_sender, password=email_password, smtpserver=email_smtpserver
) as email:
return email.send(receivers=email_receiver, title=title, content=message)
def linkedsee_warning(message, rate_limit=3600, message_prefix=None, token=None):
"""
灵犀电话报警
Args:
message:
rate_limit:
message_prefix:
token:
Returns:
"""
if not token:
log.info("未设置灵犀token,不支持报警")
return
if reach_freq_limit(rate_limit, token, message_prefix or message):
log.info("报警时间间隔过短,此次报警忽略。 内容 {}".format(message))
return
headers = {"servicetoken": token, "Content-Type": "application/json"}
url = "http://www.linkedsee.com/alarm/zabbix"
data = {"content": message}
response = requests.post(url, data=json.dumps(data), headers=headers)
return response
def wechat_warning(
message,
message_prefix=None,
rate_limit=None,
url=None,
user_phone=None,
all_users: bool = None,
):
"""企业微信报警"""
# 为了加载最新的配置
rate_limit = rate_limit if rate_limit is not None else setting.WARNING_INTERVAL
url = url or setting.WECHAT_WARNING_URL
user_phone = user_phone or setting.WECHAT_WARNING_PHONE
all_users = all_users if all_users is not None else setting.WECHAT_WARNING_ALL
if isinstance(user_phone, str):
user_phone = [user_phone] if user_phone else []
if all_users is True or not user_phone:
user_phone = ["@all"]
if not all([url, message]):
return
if reach_freq_limit(rate_limit, url, user_phone, message_prefix or message):
log.info("报警时间间隔过短,此次报警忽略。 内容 {}".format(message))
return
data = {
"msgtype": "text",
"text": {"content": message, "mentioned_mobile_list": user_phone},
}
headers = {"Content-Type": "application/json"}
try:
response = requests.post(
url, headers=headers, data=json.dumps(data).encode("utf8")
)
result = response.json()
response.close()
if result.get("errcode") == 0:
return True
else:
raise Exception(result.get("errmsg"))
except Exception as e:
log.error("报警发送失败。 报警内容 {}, error: {}".format(message, e))
return False
def send_msg(msg, level="debug", message_prefix=""):
if setting.WARNING_LEVEL == "ERROR":
if level != "error":
return
if setting.DINGDING_WARNING_URL:
keyword = "feapder报警系统\n"
dingding_warning(keyword + msg, message_prefix=message_prefix)
if setting.EMAIL_RECEIVER:
title = message_prefix or msg
if len(title) > 50:
title = title[:50] + "..."
email_warning(msg, message_prefix=message_prefix, title=title)
if setting.WECHAT_WARNING_URL:
keyword = "feapder报警系统\n"
wechat_warning(keyword + msg, message_prefix=message_prefix)
###################
def make_item(cls, data: dict):
"""提供Item类与原数据,快速构建Item实例
:param cls: Item类
:param data: 字典格式的数据
"""
item = cls()
for key, val in data.items():
setattr(item, key, val)
return item
###################
def aio_wrap(loop=None, executor=None):
"""
wrap a normal sync version of a function to an async version
"""
outer_loop = loop
outer_executor = executor
def wrap(fn):
@wraps(fn)
async def run(*args, loop=None, executor=None, **kwargs):
if loop is None:
if outer_loop is None:
loop = asyncio.get_event_loop()
else:
loop = outer_loop
if executor is None:
executor = outer_executor
pfunc = partial(fn, *args, **kwargs)
return await loop.run_in_executor(executor, pfunc)
return run
return wrap
######### number ##########
def ensure_int(n):
"""
>>> ensure_int(None)
0
>>> ensure_int(False)
0
>>> ensure_int(12)
12
>>> ensure_int("72")
72
>>> ensure_int('')
0
>>> ensure_int('1')
1
"""
if not n:
return 0
return int(n)
def ensure_float(n):
"""
>>> ensure_float(None)
0.0
>>> ensure_float(False)
0.0
>>> ensure_float(12)
12.0
>>> ensure_float("72")
72.0
"""
if not n:
return 0.0
return float(n)
|
[] |
[] |
[
"EXECJS_RUNTIME"
] |
[]
|
["EXECJS_RUNTIME"]
|
python
| 1 | 0 | |
releaser/git_test.go
|
// Copyright 2017-present The Hugo Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package releaser
import (
"os"
"testing"
"github.com/stretchr/testify/require"
)
func TestGitInfos(t *testing.T) {
skipIfCI(t)
infos, err := getGitInfos("v0.20", false)
require.NoError(t, err)
require.True(t, len(infos) > 0)
}
func TestIssuesRe(t *testing.T) {
body := `
This is a commit message.
Updates #123
Fix #345
closes #543
See #456
`
issues := extractIssues(body)
require.Len(t, issues, 4)
require.Equal(t, 123, issues[0])
require.Equal(t, 543, issues[2])
}
func TestGitVersionTagBefore(t *testing.T) {
skipIfCI(t)
v1, err := gitVersionTagBefore("v0.18")
require.NoError(t, err)
require.Equal(t, "v0.17", v1)
}
func TestTagExists(t *testing.T) {
skipIfCI(t)
b1, err := tagExists("v0.18")
require.NoError(t, err)
require.True(t, b1)
b2, err := tagExists("adfagdsfg")
require.NoError(t, err)
require.False(t, b2)
}
func skipIfCI(t *testing.T) {
if os.Getenv("CI") != "" {
// Travis has an ancient git with no --invert-grep: https://github.com/travis-ci/travis-ci/issues/6328
// Also Travis clones very shallowly, making some of the tests above shaky.
t.Skip("Skip git test on Linux to make Travis happy.")
}
}
|
[
"\"CI\""
] |
[] |
[
"CI"
] |
[]
|
["CI"]
|
go
| 1 | 0 | |
testsuite/manualmode/src/test/java/org/jboss/as/test/manualmode/management/persistence/RemoteSshGitRepositoryTestCase.java
|
/*
* Copyright 2019 JBoss by Red Hat.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jboss.as.test.manualmode.management.persistence;
import static java.nio.charset.StandardCharsets.US_ASCII;
import com.jcraft.jsch.JSch;
import com.jcraft.jsch.KeyPair;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.net.InetAddress;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.GeneralSecurityException;
import java.security.KeyPairGenerator;
import java.security.Provider;
import java.security.SecureRandom;
import java.security.Security;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.sshd.common.config.keys.KeyUtils;
import org.apache.sshd.common.config.keys.PublicKeyEntry;
import org.eclipse.jgit.api.Git;
import org.eclipse.jgit.junit.ssh.SshTestGitServer;
import org.eclipse.jgit.lib.Constants;
import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.storage.file.FileRepositoryBuilder;
import org.eclipse.jgit.util.FileUtils;
import org.jboss.as.repository.PathUtil;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.wildfly.common.annotation.NotNull;
import org.wildfly.core.testrunner.ServerControl;
import org.wildfly.core.testrunner.UnsuccessfulOperationException;
import org.wildfly.core.testrunner.WildflyTestRunner;
import org.wildfly.security.auth.server.IdentityCredentials;
import org.wildfly.security.credential.Credential;
import org.wildfly.security.credential.KeyPairCredential;
import org.wildfly.security.credential.PasswordCredential;
import org.wildfly.security.credential.store.CredentialStore;
import org.wildfly.security.credential.store.WildFlyElytronCredentialStoreProvider;
import org.wildfly.security.credential.store.impl.KeyStoreCredentialStore;
import org.wildfly.security.password.interfaces.ClearPassword;
/**
* @author <a href="mailto:[email protected]">Ashley Abdel-Sayed</a>
*/
@RunWith(WildflyTestRunner.class)
@ServerControl(manual = true)
public class RemoteSshGitRepositoryTestCase extends AbstractGitRepositoryTestCase {
private static Path backupRoot;
private static Path remoteRoot;
private static Repository remoteRepository;
private static SSHServer sshServer;
protected static int port;
private static String SSH_DIR = Paths.get("src","test", "resources", "git-persistence", ".ssh").toAbsolutePath().toString();
private String AUTH_FILE = Paths.get("src","test", "resources", "git-persistence", "wildfly-config.xml").toUri().toString();
private String RSA_USER = "testRSA";
private String RSA_PUBKEY = "id_rsa.pub";
private static String EC_USER = "testEC";
private static String EC_PUBKEY = "id_ecdsa.pub";
private String PKCS_USER = "testPKCS";
private String PKCS_PUBKEY = "id_ecdsa_pkcs.pub";
private String CS_REF_USER = "testCSRef";
private String UNKNOWN_HOSTS_USER = "testUnknownHost";
private static String CS_REF_PUBKEY = "id_rsa_cred_store.pub";
private static File KNOWN_HOSTS;
private static Path CS_PUBKEY;
private static final Provider CREDENTIAL_STORE_PROVIDER = new WildFlyElytronCredentialStoreProvider();
private static final char[] CREDENTIAL_STORE_PASSWORD = "Elytron".toCharArray();
private static Map<String, String> stores = new HashMap<>();
private static String BASE_STORE_DIRECTORY = "target/ks-cred-stores";
static {
stores.put("ONE", BASE_STORE_DIRECTORY + "/openssh-keys-test.jceks");
}
static final class Data {
private String alias;
private Credential credential;
private CredentialStore.ProtectionParameter protectionParameter;
Data(final String alias, final Credential credential, final CredentialStore.ProtectionParameter protectionParameter) {
this.alias = alias;
this.credential = credential;
this.protectionParameter = protectionParameter;
}
String getAlias() {
return alias;
}
Credential getCredential() {
return credential;
}
CredentialStore.ProtectionParameter getProtectionParameter() {
return protectionParameter;
}
}
@BeforeClass
public static void setUp() throws Exception {
backupConfiguration();
Security.insertProviderAt(CREDENTIAL_STORE_PROVIDER, 1);
cleanCredentialStores();
String file = stores.get("ONE");
String type = "JCEKS";
ArrayList<Data> data = new ArrayList<>();
KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA");
keyPairGenerator.initialize(3072, new SecureRandom());
KeyPairCredential keyPairCredential = new KeyPairCredential(keyPairGenerator.generateKeyPair());
CS_PUBKEY = new File(SSH_DIR, CS_REF_PUBKEY).toPath();
Files.write(CS_PUBKEY, Collections.singleton(PublicKeyEntry.toString(keyPairCredential.getKeyPair().getPublic())));
data.add(new Data("RSAKey", keyPairCredential, null));
if (file == null) {
throw new IllegalStateException("file has to be specified");
}
KeyStoreCredentialStore storeImpl = new KeyStoreCredentialStore();
final Map<String, String> map = new HashMap<>();
map.put("location", file);
map.put("create", Boolean.TRUE.toString());
if (type != null) map.put("keyStoreType", type);
storeImpl.initialize(
map,
new CredentialStore.CredentialSourceProtectionParameter(
IdentityCredentials.NONE.withCredential(new PasswordCredential(ClearPassword.createRaw(ClearPassword.ALGORITHM_CLEAR, CREDENTIAL_STORE_PASSWORD)))),
null
);
for (Data item : data) {
storeImpl.store(item.getAlias(), item.getCredential(), item.getProtectionParameter());
}
storeImpl.flush();
}
static void backupConfiguration() throws IOException {
Path backUpRoot = Files.createTempDirectory("BackUpConfigurationFiles").resolve("configuration");
Files.createDirectories(backUpRoot);
PathUtil.copyRecursively(getJbossServerBaseDir().resolve("configuration"), backUpRoot, true);
RemoteSshGitRepositoryTestCase.backupRoot = backUpRoot;
}
@AfterClass
public static void afterClass() throws IOException {
Security.removeProvider(CREDENTIAL_STORE_PROVIDER.getName());
FileUtils.delete(CS_PUBKEY.toFile(), FileUtils.RECURSIVE | FileUtils.RETRY);
PathUtil.deleteRecursively(backupRoot);
}
@Before
public void prepareTest() throws Exception {
remoteRoot = new File("target", "remote").toPath();
Path repoConfigDir = remoteRoot.resolve("configuration");
Files.createDirectories(repoConfigDir);
File baseDir = remoteRoot.toAbsolutePath().toFile();
Path jbossConfigDir = new File(System.getProperty("jboss.home", System.getenv("JBOSS_HOME"))).toPath().resolve("standalone").resolve("configuration");
PathUtil.copyRecursively(jbossConfigDir, repoConfigDir, true);
Path properties = repoConfigDir.resolve("logging.properties");
if(Files.exists(properties)) {
Files.delete(properties);
}
File gitDir = new File(baseDir, Constants.DOT_GIT);
if (!gitDir.exists()) {
try (Git git = Git.init().setDirectory(baseDir).call()) {
git.add().addFilepattern("configuration").call();
git.commit().setSign(false).setMessage("Repository initialized").call();
}
}
remoteRepository = new FileRepositoryBuilder().setWorkTree(baseDir).setGitDir(gitDir).setup().build();
//Generate new key pair for the server
ByteArrayOutputStream publicHostKey = new ByteArrayOutputStream();
JSch jsch = new JSch();
KeyPair hostKeyPair = KeyPair.genKeyPair(jsch, 2, 2048);
ByteArrayOutputStream hostPrivateKey = new ByteArrayOutputStream();
hostKeyPair.writePrivateKey(hostPrivateKey);
hostPrivateKey.flush();
hostKeyPair.writePublicKey(publicHostKey, "");
sshServer = new SSHServer(EC_USER, Paths.get(SSH_DIR +'/' + EC_PUBKEY),
remoteRepository, hostPrivateKey.toByteArray()); //create key pair gen
port = sshServer.start();
//Add new server to known_hosts
KNOWN_HOSTS = new File(SSH_DIR, "known_hosts");
FileWriter fileWritter = new FileWriter(KNOWN_HOSTS,true);
String knownHostTemplate = "[%s]:" + port + ' ' + publicHostKey.toString(US_ASCII.name()) + "\n";
try (BufferedWriter bw = new BufferedWriter(fileWritter)) {
bw.write(String.format(knownHostTemplate, "127.0.0.1"));
bw.write(String.format(knownHostTemplate, "localhost"));
bw.write(String.format(knownHostTemplate, InetAddress.getLocalHost().getHostName()));
if (System.getenv().containsKey("COMPUTERNAME")) {
bw.write(String.format(knownHostTemplate, System.getenv().get("COMPUTERNAME")));
}
}
}
@After
public void tearDown() throws Exception {
if (container.isStarted()) {
try {
removeDeployment();
} catch (Exception sde) {
// ignore error undeploying, might not exist
}
removeSystemProperty();
container.stop();
}
if (sshServer != null) {
sshServer.stop();
sshServer = null;
}
FileUtils.delete(KNOWN_HOSTS, FileUtils.RECURSIVE | FileUtils.RETRY);
closeRepository();
closeEmptyRemoteRepository();
closeRemoteRepository();
restoreConfiguration();
}
void restoreConfiguration() throws IOException {
Path configuration = getJbossServerBaseDir().resolve("configuration");
PathUtil.deleteRecursively(configuration);
Files.createDirectories(configuration);
PathUtil.copyRecursively(backupRoot, getJbossServerBaseDir().resolve("configuration"), true);
}
@Test
public void startGitRepoRemoteSSHAuthTest() throws Exception {
//add user to server
sshServer.setTestUser(EC_USER);
sshServer.setTestUserPublicKey(Paths.get(SSH_DIR +'/' + EC_PUBKEY));
// start with remote repository containing configuration
//(--git-repo=ssh://[email protected]:testPort/doesntmatter --git-branch=master
//--git-auth=file:./src/test/resources/git-persistence/ssh-auth/wildfly-config.xml)
container.startGitBackedConfiguration("ssh://" + EC_USER + "@127.0.0.1:" + port + "/doesntmatter",
Constants.MASTER, AUTH_FILE);
Assert.assertTrue("Directory not found " + getDotGitDir(), Files.exists(getDotGitDir()));
Assert.assertTrue("File not found " + getDotGitIgnore(), Files.exists(getDotGitIgnore()));
List<String> commits = listCommits(remoteRepository);
Assert.assertEquals(1, commits.size());
addSystemProperty();
publish(null);
commits = listCommits(remoteRepository);
Assert.assertEquals(3, commits.size());
// create branch in remote repo and change master for next test
try (Git git = new Git(remoteRepository)) {
git.checkout().setName("my_branch").setCreateBranch(true).call();
}
removeSystemProperty();
publish(null);
container.stop();
closeRepository();
// start with remote repository and branch containing configuration
// (--git-repo=ssh://[email protected]:testPort/doesntmatter --git-branch=my_branch
// --git-auth=file:./src/test/resources/git-persistence/ssh-auth/wildfly-config.xml)
container.startGitBackedConfiguration("ssh://" + EC_USER + "@127.0.0.1:" + port + "/doesntmatter",
"my_branch", AUTH_FILE);
Assert.assertTrue("Directory not found " + getDotGitDir(), Files.exists(getDotGitDir()));
Assert.assertTrue("File not found " + getDotGitIgnore(), Files.exists(getDotGitIgnore()));
try {
//my_branch was created before the system property was removed and so attempting to add the system property
//should fail as it already exists
addSystemProperty();
Assert.fail("Operation should have failed");
} catch (UnsuccessfulOperationException uoe) {
Assert.assertTrue(uoe.getMessage().contains("WFLYCTL0212"));
}
}
@Test
public void startGitRepoRemoteSSHPKCSAuthTest() throws Exception {
//add user to server
sshServer.setTestUser(PKCS_USER);
sshServer.setTestUserPublicKey(Paths.get(SSH_DIR +'/' + PKCS_PUBKEY));
// start with remote repository containing configuration
//(--git-repo=ssh://[email protected]:testPort/doesntmatter --git-branch=master
//--git-auth=file:./src/test/resources/git-persistence/ssh-auth/wildfly-config.xml)
container.startGitBackedConfiguration("ssh://" + PKCS_USER + "@127.0.0.1:" + port + "/doesntmatter",
Constants.MASTER, AUTH_FILE);
Assert.assertTrue("Directory not found " + getDotGitDir(), Files.exists(getDotGitDir()));
Assert.assertTrue("File not found " + getDotGitIgnore(), Files.exists(getDotGitIgnore()));
List<String> commits = listCommits(remoteRepository);
Assert.assertEquals(1, commits.size());
addSystemProperty();
publish(null);
commits = listCommits(remoteRepository);
Assert.assertEquals(3, commits.size());
// create branch in remote repo and change master for next test
try (Git git = new Git(remoteRepository)) {
git.checkout().setName("my_branch").setCreateBranch(true).call();
}
removeSystemProperty();
publish(null);
container.stop();
closeRepository();
// start with remote repository and branch containing configuration
// (--git-repo=ssh://[email protected]:testPort/doesntmatter --git-branch=my_branch
// --git-auth=file:./src/test/resources/git-persistence/ssh-auth/wildfly-config.xml)
container.startGitBackedConfiguration("ssh://" + PKCS_USER + "@127.0.0.1:" + port + "/doesntmatter",
"my_branch", AUTH_FILE);
Assert.assertTrue("Directory not found " + getDotGitDir(), Files.exists(getDotGitDir()));
Assert.assertTrue("File not found " + getDotGitIgnore(), Files.exists(getDotGitIgnore()));
try {
//my_branch was created before the system property was removed and so attempting to add the system property
//should fail as it already exists
addSystemProperty();
Assert.fail("Operation should have failed");
} catch (UnsuccessfulOperationException uoe) {
Assert.assertTrue(uoe.getMessage().contains("WFLYCTL0212"));
}
}
@Test
public void startGitRepoRemoteSshAuthRSATest() throws Exception {
//add user to server
sshServer.setTestUser(RSA_USER);
sshServer.setTestUserPublicKey(Paths.get(SSH_DIR +'/' + RSA_PUBKEY));
// start with remote repository containing configuration
//(--git-repo=ssh://[email protected]:testPort/doesntmatter --git-branch=master
//--git-auth=file:./src/test/resources/git-persistence/ssh-auth/wildfly-config.xml)
container.startGitBackedConfiguration("ssh://" + RSA_USER + "@127.0.0.1:" + port + "/doesntmatter",
Constants.MASTER, AUTH_FILE);
Assert.assertTrue("Directory not found " + getDotGitDir(), Files.exists(getDotGitDir()));
Assert.assertTrue("File not found " + getDotGitIgnore(), Files.exists(getDotGitIgnore()));
List<String> commits = listCommits(remoteRepository);
Assert.assertEquals(1, commits.size());
addSystemProperty();
publish(null);
commits = listCommits(remoteRepository);
Assert.assertEquals(3, commits.size());
// create branch in remote repo and change master for next test
try (Git git = new Git(remoteRepository)) {
git.checkout().setName("my_branch").setCreateBranch(true).call();
}
removeSystemProperty();
publish(null);
container.stop();
closeRepository();
// start with remote repository and branch containing configuration
//(--git-repo=ssh://[email protected]:testPort/doesntmatter --git-branch=my_branch
//--git-auth=file:./src/test/resources/git-persistence/ssh-auth/wildfly-config.xml)
container.startGitBackedConfiguration("ssh://" + RSA_USER + "@127.0.0.1:" + port + "/doesntmatter",
"my_branch", AUTH_FILE);
Assert.assertTrue("Directory not found " + getDotGitDir(), Files.exists(getDotGitDir()));
Assert.assertTrue("File not found " + getDotGitIgnore(), Files.exists(getDotGitIgnore()));
try {
//my_branch was created before the system property was removed and so attempting to add the system property
//should fail as it already exists
addSystemProperty();
Assert.fail("Operation should have failed");
} catch (UnsuccessfulOperationException uoe) {
Assert.assertTrue(uoe.getMessage().contains("WFLYCTL0212"));
}
}
@Test
public void startGitRepoRemoteSSHCredStoreRefTest() throws Exception {
//add user to server
sshServer.setTestUser(CS_REF_USER);
sshServer.setTestUserPublicKey(CS_PUBKEY);
// start with remote repository containing configuration
//(--git-repo=ssh://[email protected]:testPort/doesntmatter --git-branch=master
//--git-auth=file:./src/test/resources/git-persistence/ssh-auth/wildfly-config.xml)
container.startGitBackedConfiguration("ssh://" + CS_REF_USER + "@127.0.0.1:" + port + "/doesntmatter",
Constants.MASTER, AUTH_FILE);
Assert.assertTrue("Directory not found " + getDotGitDir(), Files.exists(getDotGitDir()));
Assert.assertTrue("File not found " + getDotGitIgnore(), Files.exists(getDotGitIgnore()));
List<String> commits = listCommits(remoteRepository);
Assert.assertEquals(1, commits.size());
addSystemProperty();
publish(null);
commits = listCommits(remoteRepository);
Assert.assertEquals(3, commits.size());
// create branch in remote repo and change master for next test
try (Git git = new Git(remoteRepository)) {
git.checkout().setName("my_branch").setCreateBranch(true).call();
}
removeSystemProperty();
publish(null);
container.stop();
closeRepository();
// start with remote repository and branch containing configuration
//(--git-repo=ssh://[email protected]:testPort/doesntmatter --git-branch=my_branch
//--git-auth=file:./src/test/resources/git-persistence/ssh-auth/wildfly-config.xml)
container.startGitBackedConfiguration("ssh://" + CS_REF_USER + "@127.0.0.1:" + port + "/doesntmatter",
"my_branch", AUTH_FILE);
Assert.assertTrue("Directory not found " + getDotGitDir(), Files.exists(getDotGitDir()));
Assert.assertTrue("File not found " + getDotGitIgnore(), Files.exists(getDotGitIgnore()));
try {
//my_branch was created before the system property was removed and so attempting to add the system property
//should fail as it already exists
addSystemProperty();
Assert.fail("Operation should have failed");
} catch (UnsuccessfulOperationException uoe) {
Assert.assertTrue(uoe.getMessage().contains("WFLYCTL0212"));
}
}
@Test
public void startGitRepoRemoteSSHFailedAuthTest() throws Exception {
//add user to server
sshServer.setTestUser(EC_USER);
sshServer.setTestUserPublicKey(Paths.get(SSH_DIR +'/' + RSA_PUBKEY)); //incorrect public key
try {
// start with remote repository containing configuration
//(--git-repo=ssh://[email protected]:testPort/doesntmatter --git-branch=master
//--git-auth=file:./src/test/resources/git-persistence/ssh-auth/wildfly-config.xml)
//Trying to access EC_USER, should not be authorized
container.startGitBackedConfiguration("ssh://" + EC_USER + "@127.0.0.1:" + port + "/doesntmatter",
Constants.MASTER, AUTH_FILE);
Assert.fail("Should have failed authentication");
} catch (RuntimeException ex) {
//
}
}
@Test
public void startGitRepoRemoteUnknownHostTest() throws Exception {
//Create new empty known hosts file
Path emptyHosts = new File(SSH_DIR, "empty_hosts").toPath();
Files.write(emptyHosts, Collections.singleton("[localhost]:"));
//add user to server
sshServer.setTestUser(UNKNOWN_HOSTS_USER);
sshServer.setTestUserPublicKey(Paths.get(SSH_DIR +'/' + RSA_PUBKEY));
try {
// start with remote repository containing configuration
//(--git-repo=ssh://[email protected]:testPort/doesntmatter --git-branch=master
//--git-auth=file:./src/test/resources/git-persistence/ssh-auth/wildfly-config.xml)
container.startGitBackedConfiguration("ssh://" + UNKNOWN_HOSTS_USER + "@127.0.0.1:" + port + "/doesntmatter",
Constants.MASTER, AUTH_FILE);
Assert.fail("Should have failed to authenticate host");
} catch (RuntimeException ex) {
Path serverLog = Paths.get(getJbossServerBaseDir().resolve("log").toString(), "server.log");
assertLogContains(serverLog, "The authenticity of host", true);
assertLogContains(serverLog, "cannot be established", true);
}
//Delete empty known_hosts file
FileUtils.delete(emptyHosts.toFile(), FileUtils.RECURSIVE | FileUtils.RETRY);
}
private static class SSHServer extends SshTestGitServer {
@NotNull
protected String testUser;
@NotNull
protected Repository repository;
public SSHServer(String testUser, Path testKey, Repository repository, byte[] hostKey) throws IOException, GeneralSecurityException {
super(testUser, testKey, repository, hostKey);
}
public void setTestUser(String testUser) {
this.testUser = testUser;
}
@Override
protected void configureAuthentication() {
super.configureAuthentication();
this.server.setPublickeyAuthenticator((userName, publicKey, session) -> {
return this.testUser.equals(userName) && KeyUtils.compareKeys(this.testKey, publicKey);
});
}
}
private void assertLogContains(final Path logFile, final String msg, final boolean expected) throws Exception {
try (final BufferedReader reader = Files.newBufferedReader(logFile, StandardCharsets.UTF_8)) {
String line;
boolean logFound = false;
while ((line = reader.readLine()) != null) {
if (line.contains(msg)) {
logFound = true;
break;
}
}
Assert.assertTrue(logFound == expected);
}
}
private void closeRemoteRepository() throws Exception {
if (remoteRepository != null) {
remoteRepository.close();
}
FileUtils.delete(remoteRoot.toFile(), FileUtils.RECURSIVE | FileUtils.RETRY);
}
private static void cleanCredentialStores() {
File dir = new File(BASE_STORE_DIRECTORY);
dir.mkdirs();
for (String f: stores.values()) {
File file = new File(f);
file.delete();
}
}
}
|
[
"\"JBOSS_HOME\""
] |
[] |
[
"JBOSS_HOME"
] |
[]
|
["JBOSS_HOME"]
|
java
| 1 | 0 | |
instana/recorder.py
|
# (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2016
# Accept, process and queue spans for eventual reporting.
from __future__ import absolute_import
import os
import sys
from basictracer import Sampler
from .span import RegisteredSpan, SDKSpan
if sys.version_info.major == 2:
import Queue as queue
else:
import queue
class StanRecorder(object):
THREAD_NAME = "Instana Span Reporting"
REGISTERED_SPANS = ("aiohttp-client", "aiohttp-server", "aws.lambda.entry",
"boto3", "cassandra", "celery-client", "celery-worker",
"couchbase", "django", "gcs", "gcps-producer",
"gcps-consumer", "log", "memcache", "mongo", "mysql",
"postgres", "pymongo", "rabbitmq", "redis","render",
"rpc-client", "rpc-server", "sqlalchemy", "soap",
"tornado-client", "tornado-server", "urllib3", "wsgi")
# Recorder thread for collection/reporting of spans
thread = None
def __init__(self, agent = None):
if agent is None:
# Late import to avoid circular import
# pylint: disable=import-outside-toplevel
from .singletons import get_agent
self.agent = get_agent()
else:
self.agent = agent
def queue_size(self):
""" Return the size of the queue; how may spans are queued, """
return self.agent.collector.span_queue.qsize()
def queued_spans(self):
""" Get all of the spans in the queue """
span = None
spans = []
import time
from .singletons import env_is_test
if env_is_test is True:
time.sleep(1)
if self.agent.collector.span_queue.empty() is True:
return spans
while True:
try:
span = self.agent.collector.span_queue.get(False)
except queue.Empty:
break
else:
spans.append(span)
return spans
def clear_spans(self):
""" Clear the queue of spans """
if self.agent.collector.span_queue.empty() == False:
self.queued_spans()
def record_span(self, span):
"""
Convert the passed BasicSpan into and add it to the span queue
"""
if self.agent.can_send():
service_name = None
source = self.agent.get_from_structure()
if "INSTANA_SERVICE_NAME" in os.environ:
service_name = self.agent.options.service_name
if span.operation_name in self.REGISTERED_SPANS:
json_span = RegisteredSpan(span, source, service_name)
else:
service_name = self.agent.options.service_name
json_span = SDKSpan(span, source, service_name)
# logger.debug("Recorded span: %s", json_span)
self.agent.collector.span_queue.put(json_span)
class InstanaSampler(Sampler):
def sampled(self, _):
# We never sample
return False
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
swift_test.go
|
// This tests the swift packagae
//
// It can be used with a real swift server which should be set up in
// the environment variables SWIFT_API_USER, SWIFT_API_KEY and
// SWIFT_AUTH_URL
// In case those variables are not defined, a fake Swift server
// is used instead - see Testing in README.md for more info
//
// The functions are designed to run in order and create things the
// next function tests. This means that if it goes wrong it is likely
// errors will propagate. You may need to tidy up the CONTAINER to
// get it to run cleanly.
package swift_test
import (
"archive/tar"
"bytes"
"context"
"crypto/md5"
"crypto/rand"
"crypto/tls"
"encoding/json"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"reflect"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/ncw/swift/v2"
"github.com/ncw/swift/v2/swifttest"
)
var (
srv *swifttest.SwiftServer
m1 = swift.Metadata{"Hello": "1", "potato-Salad": "2"}
m2 = swift.Metadata{"hello": "", "potato-salad": ""}
skipVersionTests = false
)
const (
CONTAINER = "GoSwiftUnitTest"
SEGMENTS_CONTAINER = "GoSwiftUnitTest_segments"
VERSIONS_CONTAINER = "GoSwiftUnitTestVersions"
CURRENT_CONTAINER = "GoSwiftUnitTestCurrent"
OBJECT = "test_object"
OBJECT2 = "test_object2"
SYMLINK_OBJECT = "test_symlink"
SYMLINK_OBJECT2 = "test_symlink2"
EMPTYOBJECT = "empty_test_object"
CONTENTS = "12345"
CONTENTS2 = "54321"
CONTENT_SIZE = int64(len(CONTENTS))
CONTENT_MD5 = "827ccb0eea8a706c4c34a16891f84e7b"
CONTENT2_MD5 = "01cfcd4f6b8770febfb40cb906715822"
EMPTY_MD5 = "d41d8cd98f00b204e9800998ecf8427e"
SECRET_KEY = "b3968d0207b54ece87cccc06515a89d4"
)
type someTransport struct{ http.Transport }
func makeConnection(t *testing.T) (*swift.Connection, func()) {
var err error
UserName := os.Getenv("SWIFT_API_USER")
ApiKey := os.Getenv("SWIFT_API_KEY")
AuthUrl := os.Getenv("SWIFT_AUTH_URL")
Region := os.Getenv("SWIFT_REGION_NAME")
EndpointType := os.Getenv("SWIFT_ENDPOINT_TYPE")
Insecure := os.Getenv("SWIFT_AUTH_INSECURE")
ConnectionChannelTimeout := os.Getenv("SWIFT_CONNECTION_CHANNEL_TIMEOUT")
DataChannelTimeout := os.Getenv("SWIFT_DATA_CHANNEL_TIMEOUT")
internalServer := false
if UserName == "" || ApiKey == "" || AuthUrl == "" {
srv, err = swifttest.NewSwiftServer("localhost")
if err != nil && t != nil {
t.Fatal("Failed to create server", err)
}
UserName = "swifttest"
ApiKey = "swifttest"
AuthUrl = srv.AuthURL
internalServer = true
}
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
MaxIdleConnsPerHost: 2048,
}
if Insecure == "1" {
transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
}
swift.SetExpectContinueTimeout(transport, 5*time.Second)
c := swift.Connection{
UserName: UserName,
ApiKey: ApiKey,
AuthUrl: AuthUrl,
Region: Region,
Transport: transport,
ConnectTimeout: 60 * time.Second,
Timeout: 60 * time.Second,
EndpointType: swift.EndpointType(EndpointType),
}
if !internalServer {
if isV3Api() {
c.Tenant = os.Getenv("SWIFT_TENANT")
c.Domain = os.Getenv("SWIFT_API_DOMAIN")
} else {
c.Tenant = os.Getenv("SWIFT_TENANT")
c.TenantId = os.Getenv("SWIFT_TENANT_ID")
}
}
var timeout int64
if ConnectionChannelTimeout != "" {
timeout, err = strconv.ParseInt(ConnectionChannelTimeout, 10, 32)
if err == nil {
c.ConnectTimeout = time.Duration(timeout) * time.Second
}
}
if DataChannelTimeout != "" {
timeout, err = strconv.ParseInt(DataChannelTimeout, 10, 32)
if err == nil {
c.Timeout = time.Duration(timeout) * time.Second
}
}
return &c, func() {
if srv != nil {
srv.Close()
}
}
}
func makeConnectionAuth(t *testing.T) (*swift.Connection, func()) {
ctx := context.Background()
c, rollback := makeConnection(t)
err := c.Authenticate(ctx)
if err != nil {
t.Fatal("Auth failed", err)
}
return c, rollback
}
func makeConnectionWithContainer(t *testing.T) (*swift.Connection, func()) {
ctx := context.Background()
c, rollback := makeConnectionAuth(t)
err := c.ContainerCreate(ctx, CONTAINER, m1.ContainerHeaders())
if err != nil {
t.Fatal(err)
}
return c, func() {
_ = c.ContainerDelete(ctx, CONTAINER)
rollback()
}
}
func makeConnectionWithObject(t *testing.T) (*swift.Connection, func()) {
ctx := context.Background()
c, rollback := makeConnectionWithContainer(t)
err := c.ObjectPutString(ctx, CONTAINER, OBJECT, CONTENTS, "")
if err != nil {
t.Fatal(err)
}
return c, func() {
_ = c.ObjectDelete(ctx, CONTAINER, OBJECT)
rollback()
}
}
func makeConnectionWithObjectHeaders(t *testing.T) (*swift.Connection, func()) {
ctx := context.Background()
c, rollback := makeConnectionWithObject(t)
err := c.ObjectUpdate(ctx, CONTAINER, OBJECT, m1.ObjectHeaders())
if err != nil {
t.Fatal(err)
}
return c, rollback
}
func makeConnectionWithVersionsContainer(t *testing.T) (*swift.Connection, func()) {
ctx := context.Background()
c, rollback := makeConnectionAuth(t)
err := c.VersionContainerCreate(ctx, CURRENT_CONTAINER, VERSIONS_CONTAINER)
newRollback := func() {
_ = c.ContainerDelete(ctx, CURRENT_CONTAINER)
_ = c.ContainerDelete(ctx, VERSIONS_CONTAINER)
rollback()
}
if err != nil {
if err == swift.Forbidden {
skipVersionTests = true
return c, newRollback
}
t.Fatal(err)
}
return c, newRollback
}
func makeConnectionWithVersionsObject(t *testing.T) (*swift.Connection, func()) {
ctx := context.Background()
c, rollback := makeConnectionWithVersionsContainer(t)
if err := c.ObjectPutString(ctx, CURRENT_CONTAINER, OBJECT, CONTENTS, ""); err != nil {
t.Fatal(err)
}
// Version 2
if err := c.ObjectPutString(ctx, CURRENT_CONTAINER, OBJECT, CONTENTS2, ""); err != nil {
t.Fatal(err)
}
// Version 3
if err := c.ObjectPutString(ctx, CURRENT_CONTAINER, OBJECT, CONTENTS2, ""); err != nil {
t.Fatal(err)
}
return c, func() {
for i := 0; i < 3; i++ {
_ = c.ObjectDelete(ctx, CURRENT_CONTAINER, OBJECT)
}
rollback()
}
}
func makeConnectionWithSegmentsContainer(t *testing.T) (*swift.Connection, func()) {
ctx := context.Background()
c, rollback := makeConnectionWithContainer(t)
err := c.ContainerCreate(ctx, SEGMENTS_CONTAINER, swift.Headers{})
if err != nil {
t.Fatal(err)
}
return c, func() {
err = c.ContainerDelete(ctx, SEGMENTS_CONTAINER)
if err != nil {
t.Fatal(err)
}
rollback()
}
}
func makeConnectionWithDLO(t *testing.T) (*swift.Connection, func()) {
ctx := context.Background()
c, rollback := makeConnectionWithSegmentsContainer(t)
opts := swift.LargeObjectOpts{
Container: CONTAINER,
ObjectName: OBJECT,
ContentType: "image/jpeg",
}
out, err := c.DynamicLargeObjectCreate(ctx, &opts)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 2; i++ {
_, err = fmt.Fprintf(out, "%d %s\n", i, CONTENTS)
if err != nil {
t.Fatal(err)
}
}
err = out.CloseWithContext(ctx)
if err != nil {
t.Error(err)
}
return c, func() {
_ = c.DynamicLargeObjectDelete(ctx, CONTAINER, OBJECT)
rollback()
}
}
func makeConnectionWithSLO(t *testing.T) (*swift.Connection, func()) {
ctx := context.Background()
c, rollback := makeConnectionWithSegmentsContainer(t)
opts := swift.LargeObjectOpts{
Container: CONTAINER,
ObjectName: OBJECT,
ContentType: "image/jpeg",
}
out, err := c.StaticLargeObjectCreate(ctx, &opts)
if err != nil {
if err == swift.SLONotSupported {
t.Skip("SLO not supported")
return c, rollback
}
t.Fatal(err)
}
for i := 0; i < 2; i++ {
_, err = fmt.Fprintf(out, "%d %s\n", i, CONTENTS)
if err != nil {
t.Fatal(err)
}
}
err = out.CloseWithContext(ctx)
if err != nil {
t.Error(err)
}
return c, func() {
_ = c.StaticLargeObjectDelete(ctx, CONTAINER, OBJECT)
rollback()
}
}
func isV3Api() bool {
AuthUrl := os.Getenv("SWIFT_AUTH_URL")
return strings.Contains(AuthUrl, "v3")
}
func getSwinftInfo(t *testing.T) (info swift.SwiftInfo, err error) {
ctx := context.Background()
c, rollback := makeConnectionAuth(t)
defer rollback()
return c.QueryInfo(ctx)
}
func TestTransport(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnection(t)
defer rollback()
tr := &someTransport{
Transport: http.Transport{
MaxIdleConnsPerHost: 2048,
},
}
Insecure := os.Getenv("SWIFT_AUTH_INSECURE")
if Insecure == "1" {
tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
}
c.Transport = tr
err := c.Authenticate(ctx)
if err != nil {
t.Fatal("Auth failed", err)
}
if !c.Authenticated() {
t.Fatal("Not authenticated")
}
}
// The following Test functions are run in order - this one must come before the others!
func TestV1V2Authenticate(t *testing.T) {
ctx := context.Background()
if isV3Api() {
return
}
c, rollback := makeConnection(t)
defer rollback()
err := c.Authenticate(ctx)
if err != nil {
t.Fatal("Auth failed", err)
}
if !c.Authenticated() {
t.Fatal("Not authenticated")
}
}
func TestV3AuthenticateWithDomainNameAndTenantId(t *testing.T) {
ctx := context.Background()
if !isV3Api() {
return
}
c, rollback := makeConnection(t)
defer rollback()
c.Tenant = ""
c.Domain = os.Getenv("SWIFT_API_DOMAIN")
c.TenantId = os.Getenv("SWIFT_TENANT_ID")
c.DomainId = ""
err := c.Authenticate(ctx)
if err != nil {
t.Fatal("Auth failed", err)
}
if !c.Authenticated() {
t.Fatal("Not authenticated")
}
}
func TestV3TrustWithTrustId(t *testing.T) {
ctx := context.Background()
if !isV3Api() {
return
}
c, rollback := makeConnection(t)
defer rollback()
c.TrustId = os.Getenv("SWIFT_TRUST_ID")
err := c.Authenticate(ctx)
if err != nil {
t.Fatal("Auth failed", err)
}
if !c.Authenticated() {
t.Fatal("Not authenticated")
}
}
func TestV3AuthenticateWithDomainIdAndTenantId(t *testing.T) {
ctx := context.Background()
if !isV3Api() {
return
}
c, rollback := makeConnection(t)
defer rollback()
c.Tenant = ""
c.Domain = ""
c.TenantId = os.Getenv("SWIFT_TENANT_ID")
c.DomainId = os.Getenv("SWIFT_API_DOMAIN_ID")
err := c.Authenticate(ctx)
if err != nil {
t.Fatal("Auth failed", err)
}
if !c.Authenticated() {
t.Fatal("Not authenticated")
}
}
func TestV3AuthenticateWithDomainNameAndTenantName(t *testing.T) {
ctx := context.Background()
if !isV3Api() {
return
}
c, rollback := makeConnection(t)
defer rollback()
c.Tenant = os.Getenv("SWIFT_TENANT")
c.Domain = os.Getenv("SWIFT_API_DOMAIN")
c.TenantId = ""
c.DomainId = ""
err := c.Authenticate(ctx)
if err != nil {
t.Fatal("Auth failed", err)
}
if !c.Authenticated() {
t.Fatal("Not authenticated")
}
}
func TestV3AuthenticateWithDomainIdAndTenantName(t *testing.T) {
ctx := context.Background()
if !isV3Api() {
return
}
c, rollback := makeConnection(t)
defer rollback()
c.Tenant = os.Getenv("SWIFT_TENANT")
c.Domain = ""
c.TenantId = ""
c.DomainId = os.Getenv("SWIFT_API_DOMAIN_ID")
err := c.Authenticate(ctx)
if err != nil {
t.Fatal("Auth failed", err)
}
if !c.Authenticated() {
t.Fatal("Not authenticated")
}
}
// Attempt to trigger a race in authenticate
//
// Run with -race to test
func TestAuthenticateRace(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnection(t)
defer rollback()
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
wg.Add(1)
go func() {
defer wg.Done()
err := c.Authenticate(ctx)
if err != nil {
t.Error("Auth failed", err)
}
if !c.Authenticated() {
t.Error("Not authenticated")
}
}()
}
wg.Wait()
}
// Test a connection can be serialized and unserialized with JSON
func TestSerializeConnectionJson(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionAuth(t)
defer rollback()
serializedConnection, err := json.Marshal(c)
if err != nil {
t.Fatalf("Failed to serialize connection: %v", err)
}
c2 := new(swift.Connection)
err = json.Unmarshal(serializedConnection, &c2)
if err != nil {
t.Fatalf("Failed to unserialize connection: %v", err)
}
if !c2.Authenticated() {
t.Fatal("Should be authenticated")
}
_, _, err = c2.Account(ctx)
if err != nil {
t.Fatalf("Failed to use unserialized connection: %v", err)
}
}
// Test a connection can be serialized and unserialized with XML
func TestSerializeConnectionXml(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionAuth(t)
defer rollback()
serializedConnection, err := xml.Marshal(c)
if err != nil {
t.Fatalf("Failed to serialize connection: %v", err)
}
c2 := new(swift.Connection)
err = xml.Unmarshal(serializedConnection, &c2)
if err != nil {
t.Fatalf("Failed to unserialize connection: %v", err)
}
if !c2.Authenticated() {
t.Fatal("Should be authenticated")
}
_, _, err = c2.Account(ctx)
if err != nil {
t.Fatalf("Failed to use unserialized connection: %v", err)
}
}
// Test the reauthentication logic
func TestOnReAuth(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionAuth(t)
defer rollback()
c.UnAuthenticate()
_, _, err := c.Account(ctx)
if err != nil {
t.Fatalf("Failed to reauthenticate: %v", err)
}
}
func TestAccount(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionAuth(t)
defer rollback()
info, headers, err := c.Account(ctx)
if err != nil {
t.Fatal(err)
}
if headers["X-Account-Container-Count"] != fmt.Sprintf("%d", info.Containers) {
t.Error("Bad container count")
}
if headers["X-Account-Bytes-Used"] != fmt.Sprintf("%d", info.BytesUsed) {
t.Error("Bad bytes count")
}
if headers["X-Account-Object-Count"] != fmt.Sprintf("%d", info.Objects) {
t.Error("Bad objects count")
}
}
func compareMaps(t *testing.T, a, b map[string]string) {
if len(a) != len(b) {
t.Error("Maps different sizes", a, b)
}
for ka, va := range a {
if vb, ok := b[ka]; !ok || va != vb {
t.Error("Difference in key", ka, va, b[ka])
}
}
for kb, vb := range b {
if va, ok := a[kb]; !ok || vb != va {
t.Error("Difference in key", kb, vb, a[kb])
}
}
}
func TestAccountUpdate(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionAuth(t)
defer rollback()
err := c.AccountUpdate(ctx, m1.AccountHeaders())
if err != nil {
t.Fatal(err)
}
_, headers, err := c.Account(ctx)
if err != nil {
t.Fatal(err)
}
m := headers.AccountMetadata()
delete(m, "temp-url-key") // remove X-Account-Meta-Temp-URL-Key if set
compareMaps(t, m, map[string]string{"hello": "1", "potato-salad": "2"})
err = c.AccountUpdate(ctx, m2.AccountHeaders())
if err != nil {
t.Fatal(err)
}
_, headers, err = c.Account(ctx)
if err != nil {
t.Fatal(err)
}
m = headers.AccountMetadata()
delete(m, "temp-url-key") // remove X-Account-Meta-Temp-URL-Key if set
compareMaps(t, m, map[string]string{})
}
func TestContainerCreate(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionAuth(t)
defer rollback()
err := c.ContainerCreate(ctx, CONTAINER, m1.ContainerHeaders())
if err != nil {
t.Fatal(err)
}
err = c.ContainerDelete(ctx, CONTAINER)
if err != nil {
t.Fatal(err)
}
}
func TestContainer(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithContainer(t)
defer rollback()
info, headers, err := c.Container(ctx, CONTAINER)
if err != nil {
t.Fatal(err)
}
compareMaps(t, headers.ContainerMetadata(), map[string]string{"hello": "1", "potato-salad": "2"})
if CONTAINER != info.Name {
t.Error("Bad container count")
}
if headers["X-Container-Bytes-Used"] != fmt.Sprintf("%d", info.Bytes) {
t.Error("Bad bytes count")
}
if headers["X-Container-Object-Count"] != fmt.Sprintf("%d", info.Count) {
t.Error("Bad objects count")
}
}
func TestContainersAll(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithContainer(t)
defer rollback()
containers1, err := c.ContainersAll(ctx, nil)
if err != nil {
t.Fatal(err)
}
containers2, err := c.Containers(ctx, nil)
if err != nil {
t.Fatal(err)
}
if len(containers1) != len(containers2) {
t.Fatal("Wrong length")
}
for i := range containers1 {
if containers1[i] != containers2[i] {
t.Fatal("Not the same")
}
}
}
func TestContainersAllWithLimit(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithContainer(t)
defer rollback()
containers1, err := c.ContainersAll(ctx, &swift.ContainersOpts{Limit: 1})
if err != nil {
t.Fatal(err)
}
containers2, err := c.Containers(ctx, nil)
if err != nil {
t.Fatal(err)
}
if len(containers1) != len(containers2) {
t.Fatal("Wrong length")
}
for i := range containers1 {
if containers1[i] != containers2[i] {
t.Fatal("Not the same")
}
}
}
func TestContainerUpdate(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithContainer(t)
defer rollback()
err := c.ContainerUpdate(ctx, CONTAINER, m2.ContainerHeaders())
if err != nil {
t.Fatal(err)
}
_, headers, err := c.Container(ctx, CONTAINER)
if err != nil {
t.Fatal(err)
}
compareMaps(t, headers.ContainerMetadata(), map[string]string{})
}
func TestContainerNames(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithContainer(t)
defer rollback()
containers, err := c.ContainerNames(ctx, nil)
if err != nil {
t.Fatal(err)
}
ok := false
for _, container := range containers {
if container == CONTAINER {
ok = true
break
}
}
if !ok {
t.Errorf("Didn't find container %q in listing %q", CONTAINER, containers)
}
}
func TestContainerNamesAll(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithContainer(t)
defer rollback()
containers1, err := c.ContainerNamesAll(ctx, nil)
if err != nil {
t.Fatal(err)
}
containers2, err := c.ContainerNames(ctx, nil)
if err != nil {
t.Fatal(err)
}
if len(containers1) != len(containers2) {
t.Fatal("Wrong length")
}
for i := range containers1 {
if containers1[i] != containers2[i] {
t.Fatal("Not the same")
}
}
}
func TestContainerNamesAllWithLimit(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithContainer(t)
defer rollback()
containers1, err := c.ContainerNamesAll(ctx, &swift.ContainersOpts{Limit: 1})
if err != nil {
t.Fatal(err)
}
containers2, err := c.ContainerNames(ctx, nil)
if err != nil {
t.Fatal(err)
}
if len(containers1) != len(containers2) {
t.Fatal("Wrong length")
}
for i := range containers1 {
if containers1[i] != containers2[i] {
t.Fatal("Not the same")
}
}
}
func TestObjectPutString(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithContainer(t)
defer rollback()
err := c.ObjectPutString(ctx, CONTAINER, OBJECT, CONTENTS, "")
if err != nil {
t.Fatal(err)
}
defer func() {
err = c.ObjectDelete(ctx, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
}()
info, _, err := c.Object(ctx, CONTAINER, OBJECT)
if err != nil {
t.Error(err)
}
if info.ContentType != "application/octet-stream" {
t.Error("Bad content type", info.ContentType)
}
if info.Bytes != CONTENT_SIZE {
t.Error("Bad length")
}
if info.Hash != CONTENT_MD5 {
t.Error("Bad length")
}
}
func TestObjectPut(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithContainer(t)
defer rollback()
headers := swift.Headers{}
// Set content size incorrectly - should produce an error
headers["Content-Length"] = strconv.FormatInt(CONTENT_SIZE-1, 10)
contents := bytes.NewBufferString(CONTENTS)
_, err := c.ObjectPut(ctx, CONTAINER, OBJECT, contents, true, CONTENT_MD5, "text/plain", headers)
if err == nil {
t.Fatal("Expecting error but didn't get one")
}
// Now set content size correctly
contents = bytes.NewBufferString(CONTENTS)
headers["Content-Length"] = strconv.FormatInt(CONTENT_SIZE, 10)
h, err := c.ObjectPut(ctx, CONTAINER, OBJECT, contents, true, CONTENT_MD5, "text/plain", headers)
if err != nil {
t.Fatal(err)
}
defer func() {
err = c.ObjectDelete(ctx, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
}()
if h["Etag"] != CONTENT_MD5 {
t.Errorf("Bad Etag want %q got %q", CONTENT_MD5, h["Etag"])
}
// Fetch object info and compare
info, _, err := c.Object(ctx, CONTAINER, OBJECT)
if err != nil {
t.Error(err)
}
if info.ContentType != "text/plain" {
t.Error("Bad content type", info.ContentType)
}
if info.Bytes != CONTENT_SIZE {
t.Error("Bad length")
}
if info.Hash != CONTENT_MD5 {
t.Error("Bad length")
}
}
func TestObjectPutWithReauth(t *testing.T) {
ctx := context.Background()
if !swift.IS_AT_LEAST_GO_16 {
return
}
c, rollback := makeConnectionWithContainer(t)
defer rollback()
// Simulate that our auth token expired
c.AuthToken = "expiredtoken"
r := strings.NewReader(CONTENTS)
_, err := c.ObjectPut(ctx, CONTAINER, OBJECT, r, true, "", "text/plain", nil)
if err != nil {
t.Fatal(err)
}
info, _, err := c.Object(ctx, CONTAINER, OBJECT)
if err != nil {
t.Error(err)
}
if info.ContentType != "text/plain" {
t.Error("Bad content type", info.ContentType)
}
if info.Bytes != CONTENT_SIZE {
t.Error("Bad length")
}
if info.Hash != CONTENT_MD5 {
t.Error("Bad length")
}
}
func TestObjectPutStringWithReauth(t *testing.T) {
ctx := context.Background()
if !swift.IS_AT_LEAST_GO_16 {
return
}
c, rollback := makeConnectionWithContainer(t)
defer rollback()
// Simulate that our auth token expired
c.AuthToken = "expiredtoken"
err := c.ObjectPutString(ctx, CONTAINER, OBJECT, CONTENTS, "")
if err != nil {
t.Fatal(err)
}
info, _, err := c.Object(ctx, CONTAINER, OBJECT)
if err != nil {
t.Error(err)
}
if info.ContentType != "application/octet-stream" {
t.Error("Bad content type", info.ContentType)
}
if info.Bytes != CONTENT_SIZE {
t.Error("Bad length")
}
if info.Hash != CONTENT_MD5 {
t.Error("Bad length")
}
}
func TestObjectEmpty(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithContainer(t)
defer rollback()
err := c.ObjectPutString(ctx, CONTAINER, EMPTYOBJECT, "", "")
if err != nil {
t.Fatal(err)
}
defer func() {
err = c.ObjectDelete(ctx, CONTAINER, EMPTYOBJECT)
if err != nil {
t.Error(err)
}
}()
info, _, err := c.Object(ctx, CONTAINER, EMPTYOBJECT)
if err != nil {
t.Error(err)
}
if info.ContentType != "application/octet-stream" {
t.Error("Bad content type", info.ContentType)
}
if info.Bytes != 0 {
t.Errorf("Bad length want 0 got %v", info.Bytes)
}
if info.Hash != EMPTY_MD5 {
t.Errorf("Bad MD5 want %v got %v", EMPTY_MD5, info.Hash)
}
}
func TestSymlinkObject(t *testing.T) {
ctx := context.Background()
info, err := getSwinftInfo(t)
if err != nil {
t.Fatal(err)
}
if _, ok := info["symlink"]; !ok {
// skip, symlink not supported
t.Skip("skip, symlink not supported")
return
}
c, rollback := makeConnectionWithContainer(t)
defer rollback()
// write target objects
err = c.ObjectPutBytes(ctx, CONTAINER, OBJECT, []byte(CONTENTS), "text/potato")
if err != nil {
t.Fatal(err)
}
defer func() {
err = c.ObjectDelete(ctx, CONTAINER, OBJECT)
if err != nil {
t.Error(err)
}
}()
// test dynamic link
_, err = c.ObjectSymlinkCreate(ctx, CONTAINER, SYMLINK_OBJECT, "", CONTAINER, OBJECT, "")
if err != nil {
t.Fatal(err)
}
defer func() {
err = c.ObjectDelete(ctx, CONTAINER, SYMLINK_OBJECT)
if err != nil {
t.Error(err)
}
}()
md, _, err := c.Object(ctx, CONTAINER, SYMLINK_OBJECT)
if err != nil {
t.Error(err)
}
if md.ContentType != "text/potato" {
t.Error("Bad content type", md.ContentType)
}
if md.Bytes != CONTENT_SIZE {
t.Errorf("Bad length want 5 got %v", md.Bytes)
}
if md.Hash != CONTENT_MD5 {
t.Errorf("Bad MD5 want %v got %v", CONTENT_MD5, md.Hash)
}
}
func TestStaticSymlinkObject(t *testing.T) {
ctx := context.Background()
info, err := getSwinftInfo(t)
if err != nil {
t.Fatal(err)
}
if sym, ok := info["symlink"].(map[string]interface{}); ok {
if _, ok := sym["static_links"]; !ok {
t.Skip("skip, static symlink not supported")
return
}
} else {
t.Skip("skip, symlink not supported")
return
}
c, rollback := makeConnectionWithContainer(t)
defer rollback()
// write target objects
err = c.ObjectPutBytes(ctx, CONTAINER, OBJECT2, []byte(CONTENTS2), "text/tomato")
if err != nil {
t.Fatal(err)
}
defer func() {
err = c.ObjectDelete(ctx, CONTAINER, OBJECT2)
if err != nil {
t.Error(err)
}
}()
// test static link
// first with the wrong target etag
_, err = c.ObjectSymlinkCreate(ctx, CONTAINER, SYMLINK_OBJECT2, "", CONTAINER, OBJECT2, CONTENT_MD5)
if err == nil {
t.Error("Symlink with wrong target etag should have failed")
}
_, err = c.ObjectSymlinkCreate(ctx, CONTAINER, SYMLINK_OBJECT2, "", CONTAINER, OBJECT2, CONTENT2_MD5)
if err != nil {
t.Fatal(err)
}
defer func() {
err = c.ObjectDelete(ctx, CONTAINER, SYMLINK_OBJECT2)
if err != nil {
t.Error(err)
}
}()
md, _, err := c.Object(ctx, CONTAINER, SYMLINK_OBJECT2)
if err != nil {
t.Error(err)
}
if md.ContentType != "text/tomato" {
t.Error("Bad content type", md.ContentType)
}
if md.Bytes != CONTENT_SIZE {
t.Errorf("Bad length want 5 got %v", md.Bytes)
}
if md.Hash != CONTENT2_MD5 {
t.Errorf("Bad MD5 want %v got %v", CONTENT2_MD5, md.Hash)
}
}
func TestObjectPutBytes(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithContainer(t)
defer rollback()
err := c.ObjectPutBytes(ctx, CONTAINER, OBJECT, []byte(CONTENTS), "")
if err != nil {
t.Fatal(err)
}
defer func() {
err = c.ObjectDelete(ctx, CONTAINER, OBJECT)
if err != nil {
t.Error(err)
}
}()
info, _, err := c.Object(ctx, CONTAINER, OBJECT)
if err != nil {
t.Error(err)
}
if info.ContentType != "application/octet-stream" {
t.Error("Bad content type", info.ContentType)
}
if info.Bytes != CONTENT_SIZE {
t.Error("Bad length")
}
if info.Hash != CONTENT_MD5 {
t.Error("Bad length")
}
}
func TestObjectPutMimeType(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithContainer(t)
defer rollback()
err := c.ObjectPutString(ctx, CONTAINER, "test.jpg", CONTENTS, "")
if err != nil {
t.Fatal(err)
}
defer func() {
err = c.ObjectDelete(ctx, CONTAINER, "test.jpg")
if err != nil {
t.Error(err)
}
}()
info, _, err := c.Object(ctx, CONTAINER, "test.jpg")
if err != nil {
t.Error(err)
}
if info.ContentType != "image/jpeg" {
t.Error("Bad content type", info.ContentType)
}
}
func TestObjectCreate(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithContainer(t)
defer rollback()
out, err := c.ObjectCreate(ctx, CONTAINER, OBJECT2, true, "", "", nil)
if err != nil {
t.Fatal(err)
}
defer func() {
err = c.ObjectDelete(ctx, CONTAINER, OBJECT2)
if err != nil {
t.Error(err)
}
}()
buf := &bytes.Buffer{}
hash := md5.New()
out2 := io.MultiWriter(out, buf, hash)
for i := 0; i < 100; i++ {
_, _ = fmt.Fprintf(out2, "%d %s\n", i, CONTENTS)
}
// Ensure Headers fails if called prematurely
_, err = out.Headers()
if err == nil {
t.Error("Headers should fail if called before Close()")
}
err = out.Close()
if err != nil {
t.Error(err)
}
expected := buf.String()
contents, err := c.ObjectGetString(ctx, CONTAINER, OBJECT2)
if err != nil {
t.Error(err)
}
if contents != expected {
t.Errorf("Contents wrong, expected %q, got: %q", expected, contents)
}
// Ensure Headers succeeds when called after a good upload
headers, err := out.Headers()
if err != nil {
t.Error(err)
}
if len(headers) < 1 {
t.Error("The Headers returned by Headers() should not be empty")
}
// Test writing on closed file
n, err := out.Write([]byte{0})
if err == nil || n != 0 {
t.Error("Expecting error and n == 0 writing on closed file", err, n)
}
// Now with hash instead
out, err = c.ObjectCreate(ctx, CONTAINER, OBJECT2, false, fmt.Sprintf("%x", hash.Sum(nil)), "", nil)
if err != nil {
t.Fatal(err)
}
_, err = out.Write(buf.Bytes())
if err != nil {
t.Error(err)
}
err = out.Close()
if err != nil {
t.Error(err)
}
contents, err = c.ObjectGetString(ctx, CONTAINER, OBJECT2)
if err != nil {
t.Error(err)
}
if contents != expected {
t.Errorf("Contents wrong, expected %q, got: %q", expected, contents)
}
// Now with bad hash
out, err = c.ObjectCreate(ctx, CONTAINER, OBJECT2, false, CONTENT_MD5, "", nil)
if err != nil {
t.Fatal(err)
}
// FIXME: work around bug which produces 503 not 422 for empty corrupted files
_, _ = fmt.Fprintf(out, "Sausage")
err = out.Close()
if err != swift.ObjectCorrupted {
t.Error("Expecting object corrupted not", err)
}
}
func TestObjectCreateAbort(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithContainer(t)
defer rollback()
out, err := c.ObjectCreate(ctx, CONTAINER, OBJECT2, true, "", "", nil)
if err != nil {
t.Fatal(err)
}
defer func() {
_ = c.ObjectDelete(ctx, CONTAINER, OBJECT2) // Ignore error
}()
expectedContents := "foo"
_, err = out.Write([]byte(expectedContents))
if err != nil {
t.Error(err)
}
errAbort := fmt.Errorf("abort")
err = out.CloseWithError(errAbort)
if err != nil {
t.Errorf("Unexpected error %#v", err)
}
_, err = c.ObjectGetString(ctx, CONTAINER, OBJECT2)
if err != swift.ObjectNotFound {
t.Errorf("Unexpected error: %#v", err)
}
}
func TestObjectGetString(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObject(t)
defer rollback()
contents, err := c.ObjectGetString(ctx, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
if contents != CONTENTS {
t.Error("Contents wrong")
}
}
func TestObjectGetBytes(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObject(t)
defer rollback()
contents, err := c.ObjectGetBytes(ctx, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
if string(contents) != CONTENTS {
t.Error("Contents wrong")
}
}
func TestObjectOpen(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObject(t)
defer rollback()
file, _, err := c.ObjectOpen(ctx, CONTAINER, OBJECT, true, nil)
if err != nil {
t.Fatal(err)
}
var buf bytes.Buffer
n, err := io.Copy(&buf, file)
if err != nil {
t.Fatal(err)
}
if n != CONTENT_SIZE {
t.Fatal("Wrong length", n, CONTENT_SIZE)
}
if buf.String() != CONTENTS {
t.Error("Contents wrong")
}
err = file.Close()
if err != nil {
t.Fatal(err)
}
}
func TestObjectOpenPartial(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObject(t)
defer rollback()
file, _, err := c.ObjectOpen(ctx, CONTAINER, OBJECT, true, nil)
if err != nil {
t.Fatal(err)
}
var buf bytes.Buffer
n, err := io.CopyN(&buf, file, 1)
if err != nil {
t.Fatal(err)
}
if n != 1 {
t.Fatal("Wrong length", n, CONTENT_SIZE)
}
if buf.String() != CONTENTS[:1] {
t.Error("Contents wrong")
}
err = file.Close()
if err != nil {
t.Fatal(err)
}
}
func TestObjectOpenLength(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObject(t)
defer rollback()
file, _, err := c.ObjectOpen(ctx, CONTAINER, OBJECT, true, nil)
if err != nil {
t.Fatal(err)
}
// FIXME ideally this would check both branches of the Length() code
n, err := file.Length(ctx)
if err != nil {
t.Fatal(err)
}
if n != CONTENT_SIZE {
t.Fatal("Wrong length", n, CONTENT_SIZE)
}
err = file.Close()
if err != nil {
t.Fatal(err)
}
}
func TestObjectOpenNotModified(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObject(t)
defer rollback()
_, _, err := c.ObjectOpen(ctx, CONTAINER, OBJECT, true, swift.Headers{
"If-None-Match": CONTENT_MD5,
})
if err != swift.NotModified {
t.Fatal(err)
}
}
func TestObjectOpenSeek(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObject(t)
defer rollback()
plan := []struct {
whence int
offset int64
result int64
}{
{-1, 0, 0},
{-1, 0, 1},
{-1, 0, 2},
{0, 0, 0},
{0, 0, 0},
{0, 1, 1},
{0, 2, 2},
{1, 0, 3},
{1, -2, 2},
{1, 1, 4},
{2, -1, 4},
{2, -3, 2},
{2, -2, 3},
{2, -5, 0},
{2, -4, 1},
}
file, _, err := c.ObjectOpen(ctx, CONTAINER, OBJECT, true, nil)
if err != nil {
t.Fatal(err)
}
for _, p := range plan {
if p.whence >= 0 {
var result int64
result, err = file.Seek(ctx, p.offset, p.whence)
if err != nil {
t.Fatal(err, p)
}
if result != p.result {
t.Fatal("Seek result was", result, "expecting", p.result, p)
}
}
var buf bytes.Buffer
var n int64
n, err = io.CopyN(&buf, file, 1)
if err != nil {
t.Fatal(err, p)
}
if n != 1 {
t.Fatal("Wrong length", n, p)
}
actual := buf.String()
expected := CONTENTS[p.result : p.result+1]
if actual != expected {
t.Error("Contents wrong, expecting", expected, "got", actual, p)
}
}
err = file.Close()
if err != nil {
t.Fatal(err)
}
}
// Test seeking to the end to find the file size
func TestObjectOpenSeekEnd(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObject(t)
defer rollback()
file, _, err := c.ObjectOpen(ctx, CONTAINER, OBJECT, true, nil)
if err != nil {
t.Fatal(err)
}
n, err := file.Seek(ctx, 0, 2) // seek to end
if err != nil {
t.Fatal(err)
}
if n != CONTENT_SIZE {
t.Fatal("Wrong offset", n)
}
// Now check reading returns EOF
buf := make([]byte, 16)
nn, err := io.ReadFull(file, buf)
if err != io.EOF {
t.Fatal(err)
}
if nn != 0 {
t.Fatal("wrong length", n)
}
// Now seek back to start and check we can read the file
n, err = file.Seek(ctx, 0, 0) // seek to start
if err != nil {
t.Fatal(err)
}
if n != 0 {
t.Fatal("Wrong offset", n)
}
// read file and check contents
buf, err = ioutil.ReadAll(file)
if err != nil {
t.Fatal(err)
}
if string(buf) != CONTENTS {
t.Fatal("wrong contents", string(buf))
}
}
func TestObjectUpdate(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObject(t)
defer rollback()
err := c.ObjectUpdate(ctx, CONTAINER, OBJECT, m1.ObjectHeaders())
if err != nil {
t.Fatal(err)
}
}
func checkTime(t *testing.T, when time.Time, low, high int) {
dt := time.Now().Sub(when)
if dt < time.Duration(low)*time.Second || dt > time.Duration(high)*time.Second {
t.Errorf("Time is wrong: dt=%q, when=%q", dt, when)
}
}
func TestObject(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObjectHeaders(t)
defer rollback()
object, headers, err := c.Object(ctx, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
compareMaps(t, headers.ObjectMetadata(), map[string]string{"hello": "1", "potato-salad": "2"})
if object.Name != OBJECT || object.Bytes != CONTENT_SIZE || object.ContentType != "application/octet-stream" || object.Hash != CONTENT_MD5 || object.PseudoDirectory != false || object.SubDir != "" {
t.Error("Bad object info", object)
}
checkTime(t, object.LastModified, -10, 10)
}
func TestObjectUpdate2(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObjectHeaders(t)
defer rollback()
err := c.ObjectUpdate(ctx, CONTAINER, OBJECT, m2.ObjectHeaders())
if err != nil {
t.Fatal(err)
}
_, headers, err := c.Object(ctx, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
compareMaps(t, headers.ObjectMetadata(), map[string]string{"hello": "", "potato-salad": ""})
}
func TestContainers(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObjectHeaders(t)
defer rollback()
containers, err := c.Containers(ctx, nil)
if err != nil {
t.Fatal(err)
}
ok := false
for _, container := range containers {
if container.Name == CONTAINER {
ok = true
// Container may or may not have the file contents in it
// Swift updates may be behind
if container.Count == 0 && container.Bytes == 0 {
break
}
if container.Count == 1 && container.Bytes == CONTENT_SIZE {
break
}
t.Errorf("Bad size of Container %q: %q", CONTAINER, container)
break
}
}
if !ok {
t.Errorf("Didn't find container %q in listing %q", CONTAINER, containers)
}
}
func TestObjectNames(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObjectHeaders(t)
defer rollback()
objects, err := c.ObjectNames(ctx, CONTAINER, nil)
if err != nil {
t.Fatal(err)
}
if len(objects) != 1 || objects[0] != OBJECT {
t.Error("Incorrect listing", objects)
}
}
func TestObjectNamesAll(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObjectHeaders(t)
defer rollback()
objects, err := c.ObjectNamesAll(ctx, CONTAINER, nil)
if err != nil {
t.Fatal(err)
}
if len(objects) != 1 || objects[0] != OBJECT {
t.Error("Incorrect listing", objects)
}
}
func TestObjectNamesAllWithLimit(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObjectHeaders(t)
defer rollback()
objects, err := c.ObjectNamesAll(ctx, CONTAINER, &swift.ObjectsOpts{Limit: 1})
if err != nil {
t.Fatal(err)
}
if len(objects) != 1 || objects[0] != OBJECT {
t.Error("Incorrect listing", objects)
}
}
func TestObjectsWalk(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObjectHeaders(t)
defer rollback()
objects := make([]string, 0)
err := c.ObjectsWalk(ctx, container, nil, func(ctx context.Context, opts *swift.ObjectsOpts) (interface{}, error) {
newObjects, err := c.ObjectNames(ctx, CONTAINER, opts)
if err == nil {
objects = append(objects, newObjects...)
}
return newObjects, err
})
if err != nil {
t.Fatal(err)
}
if len(objects) != 1 || objects[0] != OBJECT {
t.Error("Incorrect listing", objects)
}
}
func TestObjects(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObjectHeaders(t)
defer rollback()
objects, err := c.Objects(ctx, CONTAINER, &swift.ObjectsOpts{Delimiter: '/'})
if err != nil {
t.Fatal(err)
}
if len(objects) != 1 {
t.Fatal("Should only be 1 object")
}
object := objects[0]
if object.Name != OBJECT || object.Bytes != CONTENT_SIZE || object.ContentType != "application/octet-stream" || object.Hash != CONTENT_MD5 || object.PseudoDirectory != false || object.SubDir != "" {
t.Error("Bad object info", object)
}
checkTime(t, object.LastModified, -10, 10)
}
func TestObjectsDirectory(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObjectHeaders(t)
defer rollback()
err := c.ObjectPutString(ctx, CONTAINER, "directory", "", "application/directory")
if err != nil {
t.Fatal(err)
}
defer func() {
_ = c.ObjectDelete(ctx, CONTAINER, "directory")
}()
// Look for the directory object and check we aren't confusing
// it with a pseudo directory object
objects, err := c.Objects(ctx, CONTAINER, &swift.ObjectsOpts{Delimiter: '/'})
if err != nil {
t.Fatal(err)
}
if len(objects) != 2 {
t.Fatal("Should only be 2 objects")
}
found := false
for i := range objects {
object := objects[i]
if object.Name == "directory" {
found = true
if object.Bytes != 0 || object.ContentType != "application/directory" || object.Hash != "d41d8cd98f00b204e9800998ecf8427e" || object.PseudoDirectory != false || object.SubDir != "" {
t.Error("Bad object info", object)
}
checkTime(t, object.LastModified, -10, 10)
}
}
if !found {
t.Error("Didn't find directory object")
}
}
func TestObjectsPseudoDirectory(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObjectHeaders(t)
defer rollback()
err := c.ObjectPutString(ctx, CONTAINER, "directory/puppy.jpg", "cute puppy", "")
if err != nil {
t.Fatal(err)
}
defer func() {
_ = c.ObjectDelete(ctx, CONTAINER, "directory/puppy.jpg")
}()
// Look for the pseudo directory
objects, err := c.Objects(ctx, CONTAINER, &swift.ObjectsOpts{Delimiter: '/'})
if err != nil {
t.Fatal(err)
}
if len(objects) != 2 {
t.Fatal("Should only be 2 objects", objects)
}
found := false
for i := range objects {
object := objects[i]
if object.Name == "directory/" {
found = true
if object.Bytes != 0 || object.ContentType != "application/directory" || object.Hash != "" || object.PseudoDirectory != true || object.SubDir != "directory/" && object.LastModified.IsZero() {
t.Error("Bad object info", object)
}
}
}
if !found {
t.Error("Didn't find directory object", objects)
}
// Look in the pseudo directory now
objects, err = c.Objects(ctx, CONTAINER, &swift.ObjectsOpts{Delimiter: '/', Path: "directory/"})
if err != nil {
t.Fatal(err)
}
if len(objects) != 1 {
t.Fatal("Should only be 1 object", objects)
}
object := objects[0]
if object.Name != "directory/puppy.jpg" || object.Bytes != 10 || object.ContentType != "image/jpeg" || object.Hash != "87a12ea22fca7f54f0cefef1da535489" || object.PseudoDirectory != false || object.SubDir != "" {
t.Error("Bad object info", object)
}
checkTime(t, object.LastModified, -10, 10)
}
func TestObjectsAll(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObjectHeaders(t)
defer rollback()
objects, err := c.ObjectsAll(ctx, CONTAINER, nil)
if err != nil {
t.Fatal(err)
}
if len(objects) != 1 || objects[0].Name != OBJECT {
t.Error("Incorrect listing", objects)
}
}
func TestObjectsAllWithLimit(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObjectHeaders(t)
defer rollback()
objects, err := c.ObjectsAll(ctx, CONTAINER, &swift.ObjectsOpts{Limit: 1})
if err != nil {
t.Fatal(err)
}
if len(objects) != 1 || objects[0].Name != OBJECT {
t.Error("Incorrect listing", objects)
}
}
func TestObjectNamesWithPath(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObjectHeaders(t)
defer rollback()
objects, err := c.ObjectNames(ctx, CONTAINER, &swift.ObjectsOpts{Delimiter: '/', Path: ""})
if err != nil {
t.Fatal(err)
}
if len(objects) != 1 || objects[0] != OBJECT {
t.Error("Bad listing with path", objects)
}
// fmt.Println(objects)
objects, err = c.ObjectNames(ctx, CONTAINER, &swift.ObjectsOpts{Delimiter: '/', Path: "Downloads/"})
if err != nil {
t.Fatal(err)
}
if len(objects) != 0 {
t.Error("Bad listing with path", objects)
}
}
func TestObjectCopy(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObjectHeaders(t)
defer rollback()
_, err := c.ObjectCopy(ctx, CONTAINER, OBJECT, CONTAINER, OBJECT2, nil)
if err != nil {
t.Fatal(err)
}
err = c.ObjectDelete(ctx, CONTAINER, OBJECT2)
if err != nil {
t.Fatal(err)
}
}
func TestObjectCopyDifficultName(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObjectHeaders(t)
defer rollback()
const dest = OBJECT + "?param %30%31%32 £100"
_, err := c.ObjectCopy(ctx, CONTAINER, OBJECT, CONTAINER, dest, nil)
if err != nil {
t.Fatal(err)
}
err = c.ObjectDelete(ctx, CONTAINER, dest)
if err != nil {
t.Fatal(err)
}
}
func TestObjectCopyWithMetadata(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObjectHeaders(t)
defer rollback()
m := swift.Metadata{}
m["copy-special-metadata"] = "hello"
m["hello"] = "9"
h := m.ObjectHeaders()
h["Content-Type"] = "image/jpeg"
_, err := c.ObjectCopy(ctx, CONTAINER, OBJECT, CONTAINER, OBJECT2, h)
if err != nil {
t.Fatal(err)
}
defer func() {
err = c.ObjectDelete(ctx, CONTAINER, OBJECT2)
if err != nil {
t.Fatal(err)
}
}()
// Re-read the metadata to see if it is correct
_, headers, err := c.Object(ctx, CONTAINER, OBJECT2)
if err != nil {
t.Fatal(err)
}
if headers["Content-Type"] != "image/jpeg" {
t.Error("Didn't change content type")
}
compareMaps(t, headers.ObjectMetadata(), map[string]string{"hello": "9", "potato-salad": "2", "copy-special-metadata": "hello"})
}
func TestObjectMove(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObjectHeaders(t)
defer rollback()
err := c.ObjectMove(ctx, CONTAINER, OBJECT, CONTAINER, OBJECT2)
if err != nil {
t.Fatal(err)
}
testExistenceAfterDelete(t, c, CONTAINER, OBJECT)
_, _, err = c.Object(ctx, CONTAINER, OBJECT2)
if err != nil {
t.Fatal(err)
}
err = c.ObjectMove(ctx, CONTAINER, OBJECT2, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
testExistenceAfterDelete(t, c, CONTAINER, OBJECT2)
_, headers, err := c.Object(ctx, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
compareMaps(t, headers.ObjectMetadata(), map[string]string{"hello": "1", "potato-salad": "2"})
}
func TestObjectUpdateContentType(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObjectHeaders(t)
defer rollback()
err := c.ObjectUpdateContentType(ctx, CONTAINER, OBJECT, "text/potato")
if err != nil {
t.Fatal(err)
}
// Re-read the metadata to see if it is correct
_, headers, err := c.Object(ctx, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
if headers["Content-Type"] != "text/potato" {
t.Error("Didn't change content type")
}
compareMaps(t, headers.ObjectMetadata(), map[string]string{"hello": "1", "potato-salad": "2"})
}
func TestVersionContainerCreate(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionAuth(t)
defer rollback()
err := c.VersionContainerCreate(ctx, CURRENT_CONTAINER, VERSIONS_CONTAINER)
defer func() {
_ = c.ContainerDelete(ctx, CURRENT_CONTAINER)
_ = c.ContainerDelete(ctx, VERSIONS_CONTAINER)
}()
if err != nil {
if err == swift.Forbidden {
t.Log("Server doesn't support Versions - skipping test")
return
}
t.Fatal(err)
}
}
func TestVersionObjectAdd(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithVersionsContainer(t)
defer rollback()
if skipVersionTests {
t.Log("Server doesn't support Versions - skipping test")
return
}
// Version 1
if err := c.ObjectPutString(ctx, CURRENT_CONTAINER, OBJECT, CONTENTS, ""); err != nil {
t.Fatal(err)
}
defer func() {
err := c.ObjectDelete(ctx, CURRENT_CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
}()
if contents, err := c.ObjectGetString(ctx, CURRENT_CONTAINER, OBJECT); err != nil {
t.Fatal(err)
} else if contents != CONTENTS {
t.Error("Contents wrong")
}
// Version 2
if err := c.ObjectPutString(ctx, CURRENT_CONTAINER, OBJECT, CONTENTS2, ""); err != nil {
t.Fatal(err)
}
defer func() {
err := c.ObjectDelete(ctx, CURRENT_CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
}()
if contents, err := c.ObjectGetString(ctx, CURRENT_CONTAINER, OBJECT); err != nil {
t.Fatal(err)
} else if contents != CONTENTS2 {
t.Error("Contents wrong")
}
// Version 3
if err := c.ObjectPutString(ctx, CURRENT_CONTAINER, OBJECT, CONTENTS2, ""); err != nil {
t.Fatal(err)
}
defer func() {
err := c.ObjectDelete(ctx, CURRENT_CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
}()
}
func TestVersionObjectList(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithVersionsObject(t)
defer rollback()
if skipVersionTests {
t.Log("Server doesn't support Versions - skipping test")
return
}
list, err := c.VersionObjectList(ctx, VERSIONS_CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
if len(list) != 2 {
t.Error("Version list should return 2 objects")
}
}
func TestVersionObjectDelete(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithVersionsObject(t)
defer rollback()
if skipVersionTests {
t.Log("Server doesn't support Versions - skipping test")
return
}
// Delete Version 3
if err := c.ObjectDelete(ctx, CURRENT_CONTAINER, OBJECT); err != nil {
t.Fatal(err)
}
// Delete Version 2
if err := c.ObjectDelete(ctx, CURRENT_CONTAINER, OBJECT); err != nil {
t.Fatal(err)
}
// Contents should be reverted to Version 1
if contents, err := c.ObjectGetString(ctx, CURRENT_CONTAINER, OBJECT); err != nil {
t.Fatal(err)
} else if contents != CONTENTS {
t.Error("Contents wrong")
}
}
func TestVersionDeleteContent(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithVersionsObject(t)
defer rollback()
if skipVersionTests {
t.Log("Server doesn't support Versions - skipping test")
return
}
// Delete Version 3
if err := c.ObjectDelete(ctx, CURRENT_CONTAINER, OBJECT); err != nil {
t.Fatal(err)
}
// Delete Version 2
if err := c.ObjectDelete(ctx, CURRENT_CONTAINER, OBJECT); err != nil {
t.Fatal(err)
}
// Delete Version 1
if err := c.ObjectDelete(ctx, CURRENT_CONTAINER, OBJECT); err != nil {
t.Fatal(err)
}
if err := c.ObjectDelete(ctx, CURRENT_CONTAINER, OBJECT); err != swift.ObjectNotFound {
t.Fatalf("Expecting Object not found error, got: %v", err)
}
}
// Check for non existence after delete
// May have to do it a few times to wait for swift to be consistent.
func testExistenceAfterDelete(t *testing.T, c *swift.Connection, container, object string) {
ctx := context.Background()
for i := 10; i <= 0; i-- {
_, _, err := c.Object(ctx, container, object)
if err == swift.ObjectNotFound {
break
}
if i == 0 {
t.Fatalf("Expecting object %q/%q not found not: err=%v", container, object, err)
}
time.Sleep(1 * time.Second)
}
}
func TestObjectDelete(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithObject(t)
defer rollback()
err := c.ObjectDelete(ctx, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
testExistenceAfterDelete(t, c, CONTAINER, OBJECT)
err = c.ObjectDelete(ctx, CONTAINER, OBJECT)
if err != swift.ObjectNotFound {
t.Fatal("Expecting Object not found", err)
}
}
func TestBulkDelete(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithContainer(t)
defer rollback()
result, err := c.BulkDelete(ctx, CONTAINER, []string{OBJECT})
if err == swift.Forbidden {
t.Log("Server doesn't support BulkDelete - skipping test")
return
}
if err != nil {
t.Fatal(err)
}
if result.NumberNotFound != 1 {
t.Error("Expected 1, actual:", result.NumberNotFound)
}
if result.NumberDeleted != 0 {
t.Error("Expected 0, actual:", result.NumberDeleted)
}
err = c.ObjectPutString(ctx, CONTAINER, OBJECT, CONTENTS, "")
if err != nil {
t.Fatal(err)
}
result, err = c.BulkDelete(ctx, CONTAINER, []string{OBJECT2, OBJECT})
if err != nil {
t.Fatal(err)
}
if result.NumberNotFound != 1 {
t.Error("Expected 1, actual:", result.NumberNotFound)
}
if result.NumberDeleted != 1 {
t.Error("Expected 1, actual:", result.NumberDeleted)
}
t.Log("Errors:", result.Errors)
}
func TestBulkUpload(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithContainer(t)
defer rollback()
buffer := new(bytes.Buffer)
ds := tar.NewWriter(buffer)
var files = []struct{ Name, Body string }{
{OBJECT, CONTENTS},
{OBJECT2, CONTENTS2},
}
for _, file := range files {
hdr := &tar.Header{
Name: file.Name,
Size: int64(len(file.Body)),
}
if err := ds.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if _, err := ds.Write([]byte(file.Body)); err != nil {
t.Fatal(err)
}
}
if err := ds.Close(); err != nil {
t.Fatal(err)
}
result, err := c.BulkUpload(ctx, CONTAINER, buffer, swift.UploadTar, nil)
if err == swift.Forbidden {
t.Log("Server doesn't support BulkUpload - skipping test")
return
}
if err != nil {
t.Fatal(err)
}
defer func() {
err = c.ObjectDelete(ctx, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
err = c.ObjectDelete(ctx, CONTAINER, OBJECT2)
if err != nil {
t.Fatal(err)
}
}()
if result.NumberCreated != 2 {
t.Error("Expected 2, actual:", result.NumberCreated)
}
t.Log("Errors:", result.Errors)
_, _, err = c.Object(ctx, CONTAINER, OBJECT)
if err != nil {
t.Error("Expecting object to be found")
}
_, _, err = c.Object(ctx, CONTAINER, OBJECT2)
if err != nil {
t.Error("Expecting object to be found")
}
}
func TestObjectDifficultName(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithContainer(t)
defer rollback()
const name = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ?/`
err := c.ObjectPutString(ctx, CONTAINER, name, CONTENTS, "")
if err != nil {
t.Fatal(err)
}
defer func() {
err = c.ObjectDelete(ctx, CONTAINER, name)
if err != nil {
t.Fatal(err)
}
}()
objects, err := c.ObjectNamesAll(ctx, CONTAINER, nil)
if err != nil {
t.Error(err)
}
found := false
for _, object := range objects {
if object == name {
found = true
break
}
}
if !found {
t.Errorf("Couldn't find %q in listing %q", name, objects)
}
}
func TestTempUrl(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithContainer(t)
defer rollback()
err := c.ObjectPutBytes(ctx, CONTAINER, OBJECT, []byte(CONTENTS), "")
if err != nil {
t.Fatal(err)
}
defer func() {
err = c.ObjectDelete(ctx, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
}()
m := swift.Metadata{}
m["temp-url-key"] = SECRET_KEY
err = c.AccountUpdate(ctx, m.AccountHeaders())
if err != nil {
t.Fatal(err)
}
expiresTime := time.Now().Add(20 * time.Minute)
tempUrl := c.ObjectTempUrl(CONTAINER, OBJECT, SECRET_KEY, "GET", expiresTime)
resp, err := http.Get(tempUrl)
if err != nil {
t.Fatal("Failed to retrieve file from temporary url")
}
defer func() {
err := resp.Body.Close()
if err != nil {
t.Error("Close failed", err)
}
}()
if resp.StatusCode == 401 {
t.Log("Server doesn't support tempurl")
} else if resp.StatusCode != 200 {
t.Fatal("HTTP Error retrieving file from temporary url", resp.StatusCode)
} else {
var content []byte
if content, err = ioutil.ReadAll(resp.Body); err != nil || string(content) != CONTENTS {
t.Error("Bad content", err)
}
resp, err = http.Post(tempUrl, "image/jpeg", bytes.NewReader([]byte(CONTENTS)))
if err != nil {
t.Fatal("Failed to retrieve file from temporary url")
}
defer func() {
err := resp.Body.Close()
if err != nil {
t.Error("Close failed", err)
}
}()
if resp.StatusCode != 401 {
t.Fatal("Expecting server to forbid access to object")
}
}
}
func TestQueryInfo(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionAuth(t)
defer rollback()
infos, err := c.QueryInfo(ctx)
if err != nil {
t.Log("Server doesn't support querying info")
return
}
if _, ok := infos["swift"]; !ok {
t.Fatal("No 'swift' section found in configuration")
}
}
func TestDLOCreate(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithSegmentsContainer(t)
defer rollback()
opts := swift.LargeObjectOpts{
Container: CONTAINER,
ObjectName: OBJECT,
ContentType: "image/jpeg",
}
out, err := c.DynamicLargeObjectCreate(ctx, &opts)
if err != nil {
t.Fatal(err)
}
defer func() {
err = c.DynamicLargeObjectDelete(ctx, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
}()
buf := &bytes.Buffer{}
multi := io.MultiWriter(buf, out)
for i := 0; i < 2; i++ {
_, err = fmt.Fprintf(multi, "%d %s\n", i, CONTENTS)
if err != nil {
t.Fatal(err)
}
}
err = out.CloseWithContext(ctx)
if err != nil {
t.Error(err)
}
expected := buf.String()
contents, err := c.ObjectGetString(ctx, CONTAINER, OBJECT)
if err != nil {
t.Error(err)
}
if contents != expected {
t.Errorf("Contents wrong, expected %q, got: %q", expected, contents)
}
info, _, err := c.Object(ctx, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
if info.ObjectType != swift.DynamicLargeObjectType {
t.Errorf("Wrong ObjectType, expected %d, got: %d", swift.DynamicLargeObjectType, info.ObjectType)
}
if info.Bytes != int64(len(expected)) {
t.Errorf("Wrong Bytes size, expected %d, got: %d", len(expected), info.Bytes)
}
}
func TestDLOInsert(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithDLO(t)
defer rollback()
opts := swift.LargeObjectOpts{
Container: CONTAINER,
ObjectName: OBJECT,
CheckHash: true,
ContentType: "image/jpeg",
}
out, err := c.DynamicLargeObjectCreateFile(ctx, &opts)
if err != nil {
t.Fatal(err)
}
buf := &bytes.Buffer{}
multi := io.MultiWriter(buf, out)
_, err = fmt.Fprintf(multi, "%d%s\n", 0, CONTENTS)
if err != nil {
t.Fatal(err)
}
_, _ = fmt.Fprintf(buf, "\n%d %s\n", 1, CONTENTS)
err = out.CloseWithContext(ctx)
if err != nil {
t.Error(err)
}
expected := buf.String()
contents, err := c.ObjectGetString(ctx, CONTAINER, OBJECT)
if err != nil {
t.Error(err)
}
if contents != expected {
t.Errorf("Contents wrong, expected %q, got: %q", expected, contents)
}
}
func TestDLOAppend(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithDLO(t)
defer rollback()
opts := swift.LargeObjectOpts{
Container: CONTAINER,
ObjectName: OBJECT,
Flags: os.O_APPEND,
CheckHash: true,
ContentType: "image/jpeg",
}
out, err := c.DynamicLargeObjectCreateFile(ctx, &opts)
if err != nil {
t.Fatal(err)
}
contents, err := c.ObjectGetString(ctx, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
buf := bytes.NewBuffer([]byte(contents))
multi := io.MultiWriter(buf, out)
for i := 0; i < 2; i++ {
_, err = fmt.Fprintf(multi, "%d %s\n", i+10, CONTENTS)
if err != nil {
t.Fatal(err)
}
}
err = out.CloseWithContext(ctx)
if err != nil {
t.Error(err)
}
expected := buf.String()
contents, err = c.ObjectGetString(ctx, CONTAINER, OBJECT)
if err != nil {
t.Error(err)
}
if contents != expected {
t.Errorf("Contents wrong, expected %q, got: %q", expected, contents)
}
}
func TestDLOTruncate(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithDLO(t)
defer rollback()
opts := swift.LargeObjectOpts{
Container: CONTAINER,
ObjectName: OBJECT,
Flags: os.O_TRUNC,
CheckHash: true,
ContentType: "image/jpeg",
}
out, err := c.DynamicLargeObjectCreateFile(ctx, &opts)
if err != nil {
t.Fatal(err)
}
buf := &bytes.Buffer{}
multi := io.MultiWriter(buf, out)
_, err = fmt.Fprintf(multi, "%s", CONTENTS)
if err != nil {
t.Fatal(err)
}
err = out.CloseWithContext(ctx)
if err != nil {
t.Error(err)
}
expected := buf.String()
contents, err := c.ObjectGetString(ctx, CONTAINER, OBJECT)
if err != nil {
t.Error(err)
}
if contents != expected {
t.Errorf("Contents wrong, expected %q, got: %q", expected, contents)
}
}
func TestDLOMove(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithDLO(t)
defer rollback()
contents, err := c.ObjectGetString(ctx, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
err = c.DynamicLargeObjectMove(ctx, CONTAINER, OBJECT, CONTAINER, OBJECT2)
if err != nil {
t.Fatal(err)
}
defer func() {
err = c.DynamicLargeObjectDelete(ctx, CONTAINER, OBJECT2)
if err != nil {
t.Fatal(err)
}
}()
contents2, err := c.ObjectGetString(ctx, CONTAINER, OBJECT2)
if err != nil {
t.Fatal(err)
}
if contents2 != contents {
t.Error("Contents wrong")
}
}
func TestDLONoSegmentContainer(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithDLO(t)
defer rollback()
opts := swift.LargeObjectOpts{
Container: CONTAINER,
ObjectName: OBJECT,
ContentType: "image/jpeg",
SegmentContainer: CONTAINER,
}
out, err := c.DynamicLargeObjectCreate(ctx, &opts)
if err != nil {
t.Fatal(err)
}
buf := &bytes.Buffer{}
multi := io.MultiWriter(buf, out)
for i := 0; i < 2; i++ {
_, err = fmt.Fprintf(multi, "%d %s\n", i, CONTENTS)
if err != nil {
t.Fatal(err)
}
}
err = out.CloseWithContext(ctx)
if err != nil {
t.Error(err)
}
expected := buf.String()
contents, err := c.ObjectGetString(ctx, CONTAINER, OBJECT)
if err != nil {
t.Error(err)
}
if contents != expected {
t.Errorf("Contents wrong, expected %q, got: %q", expected, contents)
}
}
func TestDLOCreateMissingSegmentsInList(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithContainer(t)
defer rollback()
if srv == nil {
t.Skipf("This test only runs with the fake swift server as it's needed to simulate eventual consistency problems.")
return
}
listURL := "/v1/AUTH_" + swifttest.TEST_ACCOUNT + "/" + SEGMENTS_CONTAINER
srv.SetOverride(listURL, func(w http.ResponseWriter, r *http.Request, recorder *httptest.ResponseRecorder) {
for k, v := range recorder.HeaderMap {
w.Header().Set(k, v[0])
}
w.WriteHeader(recorder.Code)
_, _ = w.Write([]byte("null\n"))
})
defer srv.UnsetOverride(listURL)
headers := swift.Headers{}
err := c.ContainerCreate(ctx, SEGMENTS_CONTAINER, headers)
if err != nil {
t.Fatal(err)
}
defer func() {
err = c.ContainerDelete(ctx, SEGMENTS_CONTAINER)
if err != nil {
t.Fatal(err)
}
}()
opts := swift.LargeObjectOpts{
Container: CONTAINER,
ObjectName: OBJECT,
ContentType: "image/jpeg",
}
out, err := c.DynamicLargeObjectCreate(ctx, &opts)
if err != nil {
t.Fatal(err)
}
defer func() {
err = c.DynamicLargeObjectDelete(ctx, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
}()
buf := &bytes.Buffer{}
multi := io.MultiWriter(buf, out)
for i := 0; i < 2; i++ {
_, err = fmt.Fprintf(multi, "%d %s\n", i, CONTENTS)
if err != nil {
t.Fatal(err)
}
}
err = out.CloseWithContext(ctx)
if err != nil {
t.Error(err)
}
expected := buf.String()
contents, err := c.ObjectGetString(ctx, CONTAINER, OBJECT)
if err != nil {
t.Error(err)
}
if contents != expected {
t.Errorf("Contents wrong, expected %q, got: %q", expected, contents)
}
}
func TestDLOCreateIncorrectSize(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithContainer(t)
defer rollback()
if srv == nil {
t.Skipf("This test only runs with the fake swift server as it's needed to simulate eventual consistency problems.")
return
}
listURL := "/v1/AUTH_" + swifttest.TEST_ACCOUNT + "/" + CONTAINER + "/" + OBJECT
headCount := 0
expectedHeadCount := 5
srv.SetOverride(listURL, func(w http.ResponseWriter, r *http.Request, recorder *httptest.ResponseRecorder) {
for k, v := range recorder.HeaderMap {
w.Header().Set(k, v[0])
}
if r.Method == "HEAD" {
headCount++
if headCount < expectedHeadCount {
w.Header().Set("Content-Length", "7")
}
}
w.WriteHeader(recorder.Code)
_, _ = w.Write(recorder.Body.Bytes())
})
defer srv.UnsetOverride(listURL)
headers := swift.Headers{}
err := c.ContainerCreate(ctx, SEGMENTS_CONTAINER, headers)
if err != nil {
t.Fatal(err)
}
defer func() {
err = c.ContainerDelete(ctx, SEGMENTS_CONTAINER)
if err != nil {
t.Fatal(err)
}
}()
opts := swift.LargeObjectOpts{
Container: CONTAINER,
ObjectName: OBJECT,
ContentType: "image/jpeg",
}
out, err := c.DynamicLargeObjectCreate(ctx, &opts)
if err != nil {
t.Fatal(err)
}
defer func() {
err = c.DynamicLargeObjectDelete(ctx, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
}()
buf := &bytes.Buffer{}
multi := io.MultiWriter(buf, out)
for i := 0; i < 2; i++ {
_, err = fmt.Fprintf(multi, "%d %s\n", i, CONTENTS)
if err != nil {
t.Fatal(err)
}
}
err = out.CloseWithContext(ctx)
if err != nil {
t.Error(err)
}
if headCount != expectedHeadCount {
t.Errorf("Unexpected HEAD requests count, expected %d, got: %d", expectedHeadCount, headCount)
}
expected := buf.String()
contents, err := c.ObjectGetString(ctx, CONTAINER, OBJECT)
if err != nil {
t.Error(err)
}
if contents != expected {
t.Errorf("Contents wrong, expected %q, got: %q", expected, contents)
}
}
func TestDLOConcurrentWrite(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithSegmentsContainer(t)
defer rollback()
nConcurrency := 5
nChunks := 100
var chunkSize int64 = 1024
writeFn := func(i int) {
objName := fmt.Sprintf("%s_concurrent_dlo_%d", OBJECT, i)
opts := swift.LargeObjectOpts{
Container: CONTAINER,
ObjectName: objName,
ContentType: "image/jpeg",
}
out, err := c.DynamicLargeObjectCreate(ctx, &opts)
if err != nil {
t.Fatal(err)
}
defer func() {
err = c.DynamicLargeObjectDelete(ctx, CONTAINER, objName)
if err != nil {
t.Fatal(err)
}
}()
buf := &bytes.Buffer{}
for j := 0; j < nChunks; j++ {
var data []byte
var n int
data, err = ioutil.ReadAll(io.LimitReader(rand.Reader, chunkSize))
if err != nil {
t.Fatal(err)
}
multi := io.MultiWriter(buf, out)
n, err = multi.Write(data)
if err != nil {
t.Fatal(err)
}
if int64(n) != chunkSize {
t.Fatalf("expected to write %d, got: %d", chunkSize, n)
}
}
err = out.CloseWithContext(ctx)
if err != nil {
t.Error(err)
}
expected := buf.String()
contents, err := c.ObjectGetString(ctx, CONTAINER, objName)
if err != nil {
t.Error(err)
}
if contents != expected {
t.Error("Contents wrong")
}
}
wg := sync.WaitGroup{}
for i := 0; i < nConcurrency; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
writeFn(i)
}(i)
}
wg.Wait()
}
func TestDLOSegmentation(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithSegmentsContainer(t)
defer rollback()
opts := swift.LargeObjectOpts{
Container: CONTAINER,
ObjectName: OBJECT,
ContentType: "image/jpeg",
ChunkSize: 6,
NoBuffer: true,
}
testSegmentation(t, c, func() swift.LargeObjectFile {
out, err := c.DynamicLargeObjectCreate(ctx, &opts)
if err != nil {
t.Fatal(err)
}
return out
}, []segmentTest{
{
writes: []string{"0", "1", "2", "3", "4", "5", "6", "7", "8"},
expectedSegs: []string{"0", "1", "2", "3", "4", "5", "6", "7", "8"},
expectedValue: "012345678",
},
{
writes: []string{"012345", "012345"},
expectedSegs: []string{"012345", "012345"},
expectedValue: "012345012345",
},
{
writes: []string{"0123456", "0123456"},
expectedSegs: []string{"012345", "6", "012345", "6"},
expectedValue: "01234560123456",
},
{
writes: []string{"0123456", "0123456"},
seeks: []int{-4, 0},
expectedSegs: []string{"012012", "3456"},
expectedValue: "0120123456",
},
{
writes: []string{"0123456", "0123456", "abcde"},
seeks: []int{0, -11, 0},
expectedSegs: []string{"012abc", "d", "e12345", "6"},
expectedValue: "012abcde123456",
},
{
writes: []string{"0123456", "ab"},
seeks: []int{-4, 0},
expectedSegs: []string{"012ab5", "6"},
expectedValue: "012ab56",
},
})
}
func TestDLOSegmentationBuffered(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithSegmentsContainer(t)
defer rollback()
opts := swift.LargeObjectOpts{
Container: CONTAINER,
ObjectName: OBJECT,
ContentType: "image/jpeg",
ChunkSize: 6,
}
testSegmentation(t, c, func() swift.LargeObjectFile {
out, err := c.DynamicLargeObjectCreate(ctx, &opts)
if err != nil {
t.Fatal(err)
}
return out
}, []segmentTest{
{
writes: []string{"0", "1", "2", "3", "4", "5", "6", "7", "8"},
expectedSegs: []string{"012345", "678"},
expectedValue: "012345678",
},
{
writes: []string{"012345", "012345"},
expectedSegs: []string{"012345", "012345"},
expectedValue: "012345012345",
},
{
writes: []string{"0123456", "0123456"},
expectedSegs: []string{"012345", "6", "012345", "6"},
expectedValue: "01234560123456",
},
{
writes: []string{"0123456", "0123456"},
seeks: []int{-4, 0},
expectedSegs: []string{"012012", "3456"},
expectedValue: "0120123456",
},
{
writes: []string{"0123456", "0123456", "abcde"},
seeks: []int{0, -11, 0},
expectedSegs: []string{"012abc", "d", "e12345", "6"},
expectedValue: "012abcde123456",
},
{
writes: []string{"0123456", "ab"},
seeks: []int{-4, 0},
expectedSegs: []string{"012ab5", "6"},
expectedValue: "012ab56",
},
})
}
func TestSLOCreate(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithSegmentsContainer(t)
defer rollback()
opts := swift.LargeObjectOpts{
Container: CONTAINER,
ObjectName: OBJECT,
ContentType: "image/jpeg",
}
out, err := c.StaticLargeObjectCreate(ctx, &opts)
if err != nil {
if err == swift.SLONotSupported {
t.Skip("SLO not supported")
return
}
t.Fatal(err)
}
defer func() {
err = c.StaticLargeObjectDelete(ctx, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
}()
buf := &bytes.Buffer{}
multi := io.MultiWriter(buf, out)
for i := 0; i < 2; i++ {
_, err = fmt.Fprintf(multi, "%d %s\n", i, CONTENTS)
if err != nil {
t.Fatal(err)
}
}
err = out.CloseWithContext(ctx)
if err != nil {
t.Error(err)
}
expected := buf.String()
contents, err := c.ObjectGetString(ctx, CONTAINER, OBJECT)
if err != nil {
t.Error(err)
}
if contents != expected {
t.Errorf("Contents wrong, expected %q, got: %q", expected, contents)
}
info, _, err := c.Object(ctx, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
if info.ObjectType != swift.StaticLargeObjectType {
t.Errorf("Wrong ObjectType, expected %d, got: %d", swift.StaticLargeObjectType, info.ObjectType)
}
if info.Bytes != int64(len(expected)) {
t.Errorf("Wrong Bytes size, expected %d, got: %d", len(expected), info.Bytes)
}
}
func TestSLOInsert(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithSLO(t)
defer rollback()
opts := swift.LargeObjectOpts{
Container: CONTAINER,
ObjectName: OBJECT,
ContentType: "image/jpeg",
}
out, err := c.StaticLargeObjectCreateFile(ctx, &opts)
if err != nil {
t.Fatal(err)
}
buf := &bytes.Buffer{}
multi := io.MultiWriter(buf, out)
_, err = fmt.Fprintf(multi, "%d%s\n", 0, CONTENTS)
if err != nil {
t.Fatal(err)
}
_, _ = fmt.Fprintf(buf, "\n%d %s\n", 1, CONTENTS)
err = out.CloseWithContext(ctx)
if err != nil {
t.Error(err)
}
expected := buf.String()
contents, err := c.ObjectGetString(ctx, CONTAINER, OBJECT)
if err != nil {
t.Error(err)
}
if contents != expected {
t.Errorf("Contents wrong, expected %q, got: %q", expected, contents)
}
}
func TestSLOAppend(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithSLO(t)
defer rollback()
opts := swift.LargeObjectOpts{
Container: CONTAINER,
ObjectName: OBJECT,
Flags: os.O_APPEND,
CheckHash: true,
ContentType: "image/jpeg",
}
out, err := c.StaticLargeObjectCreateFile(ctx, &opts)
if err != nil {
t.Fatal(err)
}
contents, err := c.ObjectGetString(ctx, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
buf := bytes.NewBuffer([]byte(contents))
multi := io.MultiWriter(buf, out)
for i := 0; i < 2; i++ {
_, err = fmt.Fprintf(multi, "%d %s\n", i+10, CONTENTS)
if err != nil {
t.Fatal(err)
}
}
err = out.CloseWithContext(ctx)
if err != nil {
t.Error(err)
}
expected := buf.String()
contents, err = c.ObjectGetString(ctx, CONTAINER, OBJECT)
if err != nil {
t.Error(err)
}
if contents != expected {
t.Errorf("Contents wrong, expected %q, got: %q", expected, contents)
}
}
func TestSLOMove(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithSLO(t)
defer rollback()
contents, err := c.ObjectGetString(ctx, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
err = c.StaticLargeObjectMove(ctx, CONTAINER, OBJECT, CONTAINER, OBJECT2)
if err != nil {
t.Fatal(err)
}
defer func() {
err = c.StaticLargeObjectDelete(ctx, CONTAINER, OBJECT2)
if err != nil {
t.Fatal(err)
}
}()
contents2, err := c.ObjectGetString(ctx, CONTAINER, OBJECT2)
if err != nil {
t.Fatal(err)
}
if contents2 != contents {
t.Error("Contents wrong")
}
}
func TestSLONoSegmentContainer(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithSLO(t)
defer rollback()
opts := swift.LargeObjectOpts{
Container: CONTAINER,
ObjectName: OBJECT,
ContentType: "image/jpeg",
SegmentContainer: CONTAINER,
}
out, err := c.StaticLargeObjectCreate(ctx, &opts)
if err != nil {
t.Fatal(err)
}
buf := &bytes.Buffer{}
multi := io.MultiWriter(buf, out)
for i := 0; i < 2; i++ {
_, err = fmt.Fprintf(multi, "%d %s\n", i, CONTENTS)
if err != nil {
t.Fatal(err)
}
}
err = out.CloseWithContext(ctx)
if err != nil {
t.Error(err)
}
expected := buf.String()
contents, err := c.ObjectGetString(ctx, CONTAINER, OBJECT)
if err != nil {
t.Error(err)
}
if contents != expected {
t.Errorf("Contents wrong, expected %q, got: %q", expected, contents)
}
err = c.StaticLargeObjectDelete(ctx, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
}
func TestSLOMinChunkSize(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithSegmentsContainer(t)
defer rollback()
if srv == nil {
t.Skipf("This test only runs with the fake swift server as it's needed to simulate min segment size.")
return
}
srv.SetOverride("/info", func(w http.ResponseWriter, r *http.Request, recorder *httptest.ResponseRecorder) {
_, _ = w.Write([]byte(`{"slo": {"min_segment_size": 4}}`))
})
defer srv.UnsetOverride("/info")
_, _ = c.QueryInfo(ctx)
opts := swift.LargeObjectOpts{
Container: CONTAINER,
ObjectName: OBJECT,
ContentType: "image/jpeg",
ChunkSize: 6,
MinChunkSize: 0,
NoBuffer: true,
}
testSLOSegmentation(t, c, func() swift.LargeObjectFile {
out, err := c.StaticLargeObjectCreate(ctx, &opts)
if err != nil {
t.Fatal(err)
}
return out
})
}
func TestSLOSegmentation(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithSegmentsContainer(t)
defer rollback()
opts := swift.LargeObjectOpts{
Container: CONTAINER,
ObjectName: OBJECT,
ContentType: "image/jpeg",
ChunkSize: 6,
MinChunkSize: 4,
NoBuffer: true,
}
testSLOSegmentation(t, c, func() swift.LargeObjectFile {
out, err := c.StaticLargeObjectCreate(ctx, &opts)
if err != nil {
if err == swift.SLONotSupported {
t.Skip("SLO not supported")
}
t.Fatal(err)
}
return out
})
}
func TestSLOSegmentationBuffered(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithSegmentsContainer(t)
defer rollback()
opts := swift.LargeObjectOpts{
Container: CONTAINER,
ObjectName: OBJECT,
ContentType: "image/jpeg",
ChunkSize: 6,
MinChunkSize: 4,
}
testSegmentation(t, c, func() swift.LargeObjectFile {
out, err := c.StaticLargeObjectCreate(ctx, &opts)
if err != nil {
if err == swift.SLONotSupported {
t.Skip("SLO not supported")
}
t.Fatal(err)
}
return out
}, []segmentTest{
{
writes: []string{"0", "1", "2", "3", "4", "5", "6", "7", "8"},
expectedSegs: []string{"012345", "678"},
expectedValue: "012345678",
},
{
writes: []string{"012345", "012345"},
expectedSegs: []string{"012345", "012345"},
expectedValue: "012345012345",
},
{
writes: []string{"0123456", "0123456"},
expectedSegs: []string{"012345", "601234", "56"},
expectedValue: "01234560123456",
},
{
writes: []string{"0123456", "0123456"},
seeks: []int{-4, 0},
expectedSegs: []string{"012012", "3456"},
expectedValue: "0120123456",
},
{
writes: []string{"0123456", "0123456", "abcde"},
seeks: []int{0, -11, 0},
expectedSegs: []string{"012abc", "de1234", "56"},
expectedValue: "012abcde123456",
},
{
writes: []string{"0123456", "ab"},
seeks: []int{-4, 0},
expectedSegs: []string{"012ab5", "6"},
expectedValue: "012ab56",
},
})
}
func testSLOSegmentation(t *testing.T, c *swift.Connection, createObj func() swift.LargeObjectFile) {
testCases := []segmentTest{
{
writes: []string{"0", "1", "2", "3", "4", "5", "6", "7", "8"},
expectedSegs: []string{"0123", "4567", "8"},
expectedValue: "012345678",
},
{
writes: []string{"012345", "012345"},
expectedSegs: []string{"012345", "012345"},
expectedValue: "012345012345",
},
{
writes: []string{"0123456", "0123456"},
expectedSegs: []string{"012345", "601234", "56"},
expectedValue: "01234560123456",
},
{
writes: []string{"0123456", "0123456"},
seeks: []int{-4, 0},
expectedSegs: []string{"012012", "3456"},
expectedValue: "0120123456",
},
{
writes: []string{"0123456", "0123456", "abcde"},
seeks: []int{0, -11, 0},
expectedSegs: []string{"012abc", "de1234", "56"},
expectedValue: "012abcde123456",
},
{
writes: []string{"0123456", "ab"},
seeks: []int{-4, 0},
expectedSegs: []string{"012ab5", "6"},
expectedValue: "012ab56",
},
}
testSegmentation(t, c, createObj, testCases)
}
type segmentTest struct {
writes []string
seeks []int
expectedSegs []string
expectedValue string
}
func testSegmentation(t *testing.T, c *swift.Connection, createObj func() swift.LargeObjectFile, testCases []segmentTest) {
ctx := context.Background()
var err error
runTestCase := func(tCase segmentTest) {
out := createObj()
defer func() {
err = c.LargeObjectDelete(ctx, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
}()
for i, data := range tCase.writes {
_, err = fmt.Fprint(out, data)
if err != nil {
t.Error(err)
}
if i < len(tCase.seeks)-1 {
_, err = out.Seek(int64(tCase.seeks[i]), os.SEEK_CUR)
if err != nil {
t.Error(err)
}
}
}
err = out.CloseWithContext(ctx)
if err != nil {
t.Error(err)
}
contents, err := c.ObjectGetString(ctx, CONTAINER, OBJECT)
if err != nil {
t.Error(err)
}
if contents != tCase.expectedValue {
t.Errorf("Contents wrong, expected %q, got: %q", tCase.expectedValue, contents)
}
container, objects, err := c.LargeObjectGetSegments(ctx, CONTAINER, OBJECT)
if err != nil {
t.Error(err)
}
if container != SEGMENTS_CONTAINER {
t.Errorf("Segments container wrong, expected %q, got: %q", SEGMENTS_CONTAINER, container)
}
_, headers, err := c.Object(ctx, CONTAINER, OBJECT)
if err != nil {
t.Fatal(err)
}
if headers.IsLargeObjectSLO() {
var info swift.SwiftInfo
info, err = c.QueryInfo(ctx)
if err != nil {
t.Fatal(err)
}
if info.SLOMinSegmentSize() > 4 {
t.Log("Skipping checking segments because SLO min segment size imposed by server is larger than wanted for tests.")
return
}
}
var segContents []string
for _, obj := range objects {
var value string
value, err = c.ObjectGetString(ctx, SEGMENTS_CONTAINER, obj.Name)
if err != nil {
t.Error(err)
}
segContents = append(segContents, value)
}
if !reflect.DeepEqual(segContents, tCase.expectedSegs) {
t.Errorf("Segments wrong, expected %#v, got: %#v", tCase.expectedSegs, segContents)
}
}
for _, tCase := range testCases {
runTestCase(tCase)
}
}
func TestContainerDelete(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionWithContainer(t)
defer rollback()
err := c.ContainerDelete(ctx, CONTAINER)
if err != nil {
t.Fatal(err)
}
err = c.ContainerDelete(ctx, CONTAINER)
if err != swift.ContainerNotFound {
t.Fatal("Expecting container not found", err)
}
_, _, err = c.Container(ctx, CONTAINER)
if err != swift.ContainerNotFound {
t.Fatal("Expecting container not found", err)
}
}
func TestUnAuthenticate(t *testing.T) {
ctx := context.Background()
c, rollback := makeConnectionAuth(t)
defer rollback()
c.UnAuthenticate()
if c.Authenticated() {
t.Fatal("Shouldn't be authenticated")
}
// Test re-authenticate
err := c.Authenticate(ctx)
if err != nil {
t.Fatal("ReAuth failed", err)
}
if !c.Authenticated() {
t.Fatal("Not authenticated")
}
}
|
[
"\"SWIFT_API_USER\"",
"\"SWIFT_API_KEY\"",
"\"SWIFT_AUTH_URL\"",
"\"SWIFT_REGION_NAME\"",
"\"SWIFT_ENDPOINT_TYPE\"",
"\"SWIFT_AUTH_INSECURE\"",
"\"SWIFT_CONNECTION_CHANNEL_TIMEOUT\"",
"\"SWIFT_DATA_CHANNEL_TIMEOUT\"",
"\"SWIFT_TENANT\"",
"\"SWIFT_API_DOMAIN\"",
"\"SWIFT_TENANT\"",
"\"SWIFT_TENANT_ID\"",
"\"SWIFT_AUTH_URL\"",
"\"SWIFT_AUTH_INSECURE\"",
"\"SWIFT_API_DOMAIN\"",
"\"SWIFT_TENANT_ID\"",
"\"SWIFT_TRUST_ID\"",
"\"SWIFT_TENANT_ID\"",
"\"SWIFT_API_DOMAIN_ID\"",
"\"SWIFT_TENANT\"",
"\"SWIFT_API_DOMAIN\"",
"\"SWIFT_TENANT\"",
"\"SWIFT_API_DOMAIN_ID\""
] |
[] |
[
"SWIFT_AUTH_URL",
"SWIFT_CONNECTION_CHANNEL_TIMEOUT",
"SWIFT_AUTH_INSECURE",
"SWIFT_API_DOMAIN",
"SWIFT_TRUST_ID",
"SWIFT_REGION_NAME",
"SWIFT_API_DOMAIN_ID",
"SWIFT_API_KEY",
"SWIFT_TENANT",
"SWIFT_ENDPOINT_TYPE",
"SWIFT_API_USER",
"SWIFT_DATA_CHANNEL_TIMEOUT",
"SWIFT_TENANT_ID"
] |
[]
|
["SWIFT_AUTH_URL", "SWIFT_CONNECTION_CHANNEL_TIMEOUT", "SWIFT_AUTH_INSECURE", "SWIFT_API_DOMAIN", "SWIFT_TRUST_ID", "SWIFT_REGION_NAME", "SWIFT_API_DOMAIN_ID", "SWIFT_API_KEY", "SWIFT_TENANT", "SWIFT_ENDPOINT_TYPE", "SWIFT_API_USER", "SWIFT_DATA_CHANNEL_TIMEOUT", "SWIFT_TENANT_ID"]
|
go
| 13 | 0 | |
scripts/CF_OCR.py
|
import logging
import json
import os
import time
import base64
from google.cloud import pubsub_v1
from google.cloud import vision, storage
from google.protobuf import json_format
import google.cloud.dlp
def documentOCR(vision_client, gcs_source_uri, gcs_destination_uri, batch_size=20):
"""
Args:
vision_client:
gcs_source_uri:
gcs_destination_uri:
batch_size:
Returns:
"""
doc_title = gcs_source_uri.split('/')[-1].split('.pdf')[0]
# Supported mime_types are: 'application/pdf' and 'image/tiff'
mime_type = 'application/pdf'
# Feature in vision API
feature = vision.types.Feature(
type=vision.enums.Feature.Type.DOCUMENT_TEXT_DETECTION)
gcs_source = vision.types.GcsSource(uri=gcs_source_uri)
input_config = vision.types.InputConfig(
gcs_source=gcs_source, mime_type=mime_type)
gcs_destination = vision.types.GcsDestination(uri=gcs_destination_uri)
output_config = vision.types.OutputConfig(
gcs_destination=gcs_destination, batch_size=batch_size)
async_request = vision.types.AsyncAnnotateFileRequest(
features=[feature], input_config=input_config,
output_config=output_config)
operation = vision_client.async_batch_annotate_files(
requests=[async_request])
# print('Waiting for the operation to finish.')
operation.result(timeout=180)
logging.info('Text extraction from document {} is completed.'.format(doc_title))
def readJsonResult(storage_client, bucket_name, doc_title):
"""
Parsing the json files and extract text.
Args:
storage_client:
bucket_name:
doc_title:
Returns:
all_text: str - Containing all text of the document
"""
gcs_src_prefix = 'json/' + '{}-'.format(doc_title)
# List objects with the given prefix.
bucket_client = storage_client.get_bucket(bucket_name)
blob_list = list(bucket_client.list_blobs(prefix=gcs_src_prefix))
all_text = ''
for blob in blob_list:
json_string = blob.download_as_string()
response = json_format.Parse(
json_string, vision.types.AnnotateFileResponse())
# The actual response for the first page of the input file.
for response in response.responses:
# first_page_response = response.responses[0]
text_response = response.full_text_annotation.text
all_text += text_response
all_text += ' '
logging.info("Parsing of {} json doc was successful.".format(doc_title))
return all_text
def deterministicDeidentifyWithFpe(dlp_client, parent, text, info_types, surrogate_type, wrapped_key=None):
"""Uses the Data Loss Prevention API to deidentify sensitive data in a
string using Format Preserving Encryption (FPE).
Args:
dlp_client: DLP Client instantiation
parent: str - The parent resource name, for example projects/my-project-id.
text: str - text to deidentify
info_types: list type of sensitive data, such as a name, email address, telephone number, identification number,
or credit card number. https://cloud.google.com/dlp/docs/infotypes-reference
surrogate_type: The name of the surrogate custom info type to use. Only
necessary if you want to reverse the deidentification process. Can
be essentially any arbitrary string, as long as it doesn't appear
in your dataset otherwise.
wrapped_key: The encrypted ('wrapped') AES-256 key to use. This key
should be encrypted using the Cloud KMS key specified by key_name.
Returns:
None; the response from the API is printed to the terminal.
"""
# The wrapped key is base64-encoded, but the library expects a binary
# string, so decode it here.
wrapped_key = base64.b64decode(wrapped_key)
# Construct inspect configuration dictionary
inspect_config = {
"info_types": [{"name": info_type} for info_type in info_types]
}
# Construct deidentify configuration dictionary
deidentify_config = {
"info_type_transformations": {
"transformations": [
{
"primitive_transformation": {
"crypto_deterministic_config": {
"crypto_key": {
"unwrapped": {
"key": wrapped_key
}
},
'surrogate_info_type': {"name": surrogate_type}
},
}
}
]
}
}
# Convert string to item
item = {"value": text}
# Call the API
response = dlp_client.deidentify_content(
parent=parent,
inspect_config=inspect_config,
deidentify_config=deidentify_config,
item=item,
)
# Print results
logging.info('Successful Redaction.')
return response.item.value
def uploadBlob(storage_client, bucket_name, txt_content, destination_blob_name):
"""
Uploads a file to the bucket.
Args:
storage_client:
bucket_name:
txt_content: str - text
destination_blob_name: str - prefix
Returns:
"""
destination_blob_name = destination_blob_name.split('gs://{}/'.format(bucket_name))[-1]
bucket_client = storage_client.bucket(bucket_name)
blob = bucket_client.blob(destination_blob_name)
blob.upload_from_string(txt_content)
logging.info("Text uploaded to {}".format(destination_blob_name))
def publishMsg(publisher_client, project_id, text, doc_title, topic_name):
"""
Publish message with text and filename.
Args:
publisher_client: client instantiation
project_id: str -
text: str - Text contained in the document
doc_title: str -
topic_name: str -
Returns:
"""
# Compose the message to be sent to pubsub
message = {
'text': text,
'doc_title': doc_title,
}
# Publish message to PubSub
# Note: the message_data needs to be in bytestring
# Refer to the documentation:
# https://googleapis.dev/python/pubsub/latest/publisher/api/client.html
message_data = json.dumps(message).encode('utf-8')
topic_path = publisher_client.topic_path(project_id, topic_name)
# Publish method returns a future instance
future = publisher_client.publish(topic_path, data=message_data)
# We need to call result method to extract the message ID
# Refer to the documentation:
# https://googleapis.dev/python/pubsub/latest/publisher/api/futures.html#google.cloud.pubsub_v1.publisher.futures.Future
message_id = future.result()
logging.info("Message id: {} was published in topic: {}".format(message_id, topic_name))
def processPDFFile(file, context):
"""
This function will be triggered when a pdf file is uploaded to the GCS bucket of interest.
Args:
file (dict): Metadata of the changed file, provided by the triggering
Cloud Storage event.
context (google.cloud.functions.Context): Metadata of triggering event.
Returns:
None; the output is written to stdout and Stackdriver Logging
"""
start_time = time.time()
publisher_client = pubsub_v1.PublisherClient()
vision_client = vision.ImageAnnotatorClient()
storage_client = storage.Client()
dlp_client = google.cloud.dlp_v2.DlpServiceClient()
project_id = os.environ['GCP_PROJECT']
location = 'global' # or you can set it to os.environ['LOCATION']
RESULT_TOPIC = os.environ["RESULT_TOPIC"] # e.g pdf2text
src_bucket = file.get('bucket')
dest_bucket = 'aketari-covid19-data'
prefix_and_doc_title = file.get('name')
doc_title = prefix_and_doc_title.split('/')[-1].split('.')[0]
print('name is: {}'.format(prefix_and_doc_title))
# Step 1: Call OCR helper function
gcs_source_path = 'gs://' + src_bucket + '/' + prefix_and_doc_title
print('source gcs path: {}'.format(gcs_source_path))
print('=============================')
json_gcs_dest_path = 'gs://' + dest_bucket + '/json/' + doc_title + '-'
print('destination json path: {}'.format(json_gcs_dest_path))
print('=============================')
documentOCR(vision_client, gcs_source_path, json_gcs_dest_path)
print("completed OCR step!")
print('=============================')
# Step 2: Parse json file
text = readJsonResult(storage_client, dest_bucket, doc_title)
print("Completed json parsing step!")
print('=============================')
# Step 3: Redact text
parent = "{}/{}".format(project_id,location)
# TODO: replace gcs_prefix_secret with the correct location
gcs_prefix_secret = 'path/to/your/secret_file.txt'
INFO_TYPES = ["FIRST_NAME", "LAST_NAME", "FEMALE_NAME", "MALE_NAME",
"PERSON_NAME", "STREET_ADDRESS", "ITALY_FISCAL_CODE"]
bucket_client = storage_client.get_bucket(dest_bucket)
AES_bytes = bucket_client.blob(gcs_prefix_secret).download_as_string().encode('utf-8')
base64_AES_bytes = base64.b64encode(AES_bytes)
redacted_text = deterministicDeidentifyWithFpe(dlp_client=dlp_client, parent=parent,
text=text, info_types=INFO_TYPES,
surrogate_type="REDACTED",
b64encoded_bytes=base64_AES_bytes)
print("Completed redaction step!")
print('=============================')
# Step 4: Publish on pubsub
topic_name = RESULT_TOPIC
publishMsg(publisher_client, project_id, text, doc_title, topic_name)
publishMsg(redacted_text, doc_title, topic_name)
print("Completed pubsub messaging step!")
print('=============================')
# Step 4: Save on GCS
upload_dest_prefix_for_text = 'raw_txt/{}.txt'.format(doc_title)
uploadBlob(storage_client, dest_bucket, text, upload_dest_prefix_for_text)
upload_dest_prefix_for_redacted_text = 'redacted_raw_txt/{}.txt'.format(doc_title)
uploadBlob(storage_client, dest_bucket, redacted_text, upload_dest_prefix_for_redacted_text)
print("Completed upload step!")
print('=============================')
print('File {} processed.'.format(doc_title))
end_time = time.time() - start_time
logging.info("Completion of the text extraction and redaction took: {} seconds".format(round(end_time, 1)))
|
[] |
[] |
[
"RESULT_TOPIC",
"LOCATION",
"GCP_PROJECT"
] |
[]
|
["RESULT_TOPIC", "LOCATION", "GCP_PROJECT"]
|
python
| 3 | 0 | |
aws_saml_auth/__init__.py
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import base64
import os
import sys
import logging
from six import print_ as print
from tzlocal import get_localzone
from aws_saml_auth import amazon
from aws_saml_auth import configuration
from aws_saml_auth import saml
from aws_saml_auth import util
with open(
os.path.join(os.path.abspath(os.path.dirname(__file__)), "VERSION"),
encoding="utf-8",
) as version_file:
version = version_file.read().strip()
def parse_args(args):
parser = argparse.ArgumentParser(
prog="aws-saml-auth",
description="Acquire temporary AWS credentials via SAML",
)
main_group = parser.add_mutually_exclusive_group()
main_group.add_argument(
"--redirect-server",
action="store_true",
help="Run the redirect server on port ($PORT)",
)
main_group.add_argument(
"-L", "--login-url", help="SAML Provider login url ($ASA_LOGIN_URL)"
)
parser.add_argument(
"-R", "--region", help="AWS region endpoint ($AWS_DEFAULT_REGION)"
)
duration_group = parser.add_mutually_exclusive_group()
duration_group.add_argument(
"-d",
"--duration",
type=int,
help="Credential duration in seconds (defaults to value of $ASA_DURATION, then falls back to 43200)",
)
duration_group.add_argument(
"--auto-duration",
action="store_true",
help="Tries to use the longest allowed duration ($ASA_AUTO_DURATION=1)",
)
parser.add_argument(
"-p",
"--profile",
help="AWS profile (defaults to value of $AWS_PROFILE, then falls back to 'default')",
)
parser.add_argument(
"-A", "--account", help="Filter for specific AWS account ($ASA_AWS_ACCOUNT)"
)
parser.add_argument("-q", "--quiet", action="store_true", help="Quiet output")
parser.add_argument(
"--saml-assertion",
dest="saml_assertion",
help="Base64 encoded SAML assertion to use",
)
parser.add_argument(
"--no-saml-cache",
dest="use_saml_cache",
action="store_false",
help="Do not cache the SAML Assertion ($ASA_NO_SAML_CACHE=1)",
)
print_group = parser.add_mutually_exclusive_group()
print_group.add_argument(
"--print-creds", action="store_true", help="Print Credentials"
)
print_group.add_argument(
"--credential-process",
action="store_true",
help="Output suitable for aws cli credential_process ($ASA_CREDENTIAL_PROCESS=1)",
)
parser.add_argument(
"--no-resolve-aliases",
dest="resolve_aliases",
action="store_false",
help="Do not resolve AWS account aliases. ($ASA_NO_RESOLVE_ALIASES=1)",
)
parser.add_argument("--port", type=int, help="Port for the redirect server ($PORT)")
role_group = parser.add_mutually_exclusive_group()
role_group.add_argument(
"--no-ask-role",
dest="ask_role",
action="store_false",
help="Never ask to pick the role ($ASA_NO_ASK_ROLE=1)",
)
role_group.add_argument(
"-r", "--role-arn", help="The ARN of the role to assume ($ASA_ROLE_ARN)"
)
parser.add_argument(
"-l",
"--log",
dest="log_level",
choices=["debug", "info", "warn"],
default="warn",
help="Select log level (default: %(default)s)",
)
parser.add_argument(
"-V",
"--version",
action="version",
version="%(prog)s {version}".format(version=version),
)
return parser.parse_args(args)
def exit_if_unsupported_python():
if sys.version_info.major == 2 and sys.version_info.minor < 7:
logging.critical(
"%s requires Python 2.7 or higher. Please consider "
"upgrading. Support for Python 2.6 and lower was "
"dropped because this tool's dependencies dropped "
"support.",
__name__,
)
logging.critical(
"For debugging, it appears you're running: %s", sys.version_info
)
logging.critical(
"For more information, see: "
"https://github.com/cevoaustralia/aws-google-auth/"
"issues/41"
)
sys.exit(1)
def cli(cli_args):
try:
exit_if_unsupported_python()
args = parse_args(args=cli_args)
# Set up logging
logging.getLogger().setLevel(getattr(logging, args.log_level.upper(), None))
config = resolve_config(args)
if args.redirect_server:
from aws_saml_auth.redirect_server import start_redirect_server
start_redirect_server(config.port)
return
process_auth(args, config)
except amazon.ExpectedAmazonException as ex:
print(ex)
sys.exit(1)
except saml.ExpectedSamlException as ex:
print(ex)
sys.exit(1)
except KeyboardInterrupt:
pass
except Exception as ex:
logging.exception(ex)
def resolve_config(args):
# Shortening Convenience functions
coalesce = util.Util.coalesce
# Create a blank configuration object (has the defaults pre-filled)
config = configuration.Configuration()
# Have the configuration update itself via the ~/.aws/config on disk.
# Profile (Option priority = ARGS, ENV_VAR, DEFAULT)
config.profile = coalesce(args.profile, os.getenv("AWS_PROFILE"), config.profile)
# Now that we've established the profile, we can read the configuration and
# fill in all the other variables.
config.read(config.profile)
# Ask Role (Option priority = ARGS, ENV_VAR, DEFAULT)
config.ask_role = coalesce(
(False if os.getenv("ASA_NO_ASK_ROLE") != None else None),
args.ask_role,
config.ask_role,
)
# Do not cache the SAML Assertion (Option priority = ARGS, ENV_VAR, DEFAULT)
config.use_saml_cache = coalesce(
(False if os.getenv("ASA_NO_SAML_CACHE") != None else None),
args.use_saml_cache,
config.use_saml_cache,
)
# Duration (Option priority = ARGS, ENV_VAR, DEFAULT)
config.duration = int(
coalesce(args.duration, os.getenv("ASA_DURATION"), config.duration)
)
# Automatic duration (Option priority = ARGS, ENV_VAR, DEFAULT)
config.auto_duration = args.auto_duration or os.getenv("ASA_AUTO_DURATION") != None
# Login URL (Option priority = ARGS, ENV_VAR, DEFAULT)
config.login_url = coalesce(
args.login_url, os.getenv("ASA_LOGIN_URL"), config.login_url
)
# Region (Option priority = ARGS, ENV_VAR, DEFAULT)
config.region = coalesce(
args.region, os.getenv("AWS_DEFAULT_REGION"), config.region
)
# ROLE ARN (Option priority = ARGS, ENV_VAR, DEFAULT)
config.role_arn = coalesce(
args.role_arn, os.getenv("ASA_ROLE_ARN"), config.role_arn
)
# Resolve AWS aliases enabled (Option priority = ARGS, ENV_VAR, DEFAULT)
config.resolve_aliases = coalesce(
(False if os.getenv("ASA_NO_RESOLVE_ALIASES") != None else None),
args.resolve_aliases,
config.resolve_aliases,
)
# Account (Option priority = ARGS, ENV_VAR, DEFAULT)
config.account = coalesce(
args.account, os.getenv("ASA_AWS_ACCOUNT"), config.account
)
config.print_creds = coalesce(args.print_creds, config.print_creds)
# Quiet
config.quiet = coalesce(args.quiet, config.quiet)
config.port = int(coalesce(args.port, os.getenv("PORT"), config.port))
config.credential_process = (
args.credential_process or os.getenv("ASA_CREDENTIAL_PROCESS") != None
)
if config.credential_process:
config.quiet = True
config.ask_role = False
config.read_token_cache()
if config.use_saml_cache:
config.read_saml_cache()
return config
def process_auth(args, config):
if config.region is None:
config.region = util.Util.get_input("AWS Region: ")
logging.debug("%s: region is: %s", __name__, config.region)
if config.login_url is None:
config.login_url = util.Util.get_input("Login URL: ")
logging.debug("%s: login url is: %s", __name__, config.login_url)
# If there is a valid cache and the user opted to use it, use that instead
# of prompting the user for input (it will also ignroe any set variables
# such as username or sp_id and idp_id, as those are built into the SAML
# response). The user does not need to be prompted for a password if the
# SAML cache is used.
if args.saml_assertion:
saml_xml = base64.b64decode(args.saml_assertion)
elif config.token_cache:
saml_xml = None
elif config.saml_cache:
saml_xml = config.saml_cache
logging.info("%s: SAML cache found", __name__)
else:
saml_client = saml.Saml(config)
saml_xml = saml_client.do_browser_saml()
# We now have a new SAML value that can get cached (If the user asked
# for it to be)
if config.use_saml_cache:
config.saml_cache = saml_xml
# The amazon_client now has the SAML assertion it needed (Either via the
# cache or freshly generated). From here, we can get the roles and continue
# the rest of the workflow regardless of cache.
amazon_client = amazon.Amazon(config, saml_xml)
if saml_xml is not None:
roles = amazon_client.roles
# Determine the provider and the role arn (if the the user provided isn't an option)
if config.role_arn in roles and not config.ask_role:
config.provider = roles[config.role_arn]
else:
if config.account and config.resolve_aliases:
aliases = amazon_client.resolve_aws_aliases(roles)
config.role_arn, config.provider = util.Util.pick_a_role(
roles, aliases, config.account
)
elif config.account:
config.role_arn, config.provider = util.Util.pick_a_role(
roles, account=config.account
)
elif config.resolve_aliases:
aliases = amazon_client.resolve_aws_aliases(roles)
config.role_arn, config.provider = util.Util.pick_a_role(roles, aliases)
else:
config.role_arn, config.provider = util.Util.pick_a_role(roles)
if not config.quiet:
print("Assuming " + config.role_arn)
print(
"Credentials Expiration: "
+ format(amazon_client.expiration.astimezone(get_localzone()))
)
if config.credential_process:
amazon_client.print_credential_process()
config.write_token_cache(amazon_client)
elif config.print_creds:
amazon_client.print_export_line()
elif config.profile:
config.write(amazon_client)
config.write_saml_cache()
def main():
cli_args = sys.argv[1:]
cli(cli_args)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"PORT",
"ASA_CREDENTIAL_PROCESS",
"AWS_DEFAULT_REGION",
"ASA_AWS_ACCOUNT",
"ASA_NO_ASK_ROLE",
"ASA_DURATION",
"ASA_ROLE_ARN",
"AWS_PROFILE",
"ASA_LOGIN_URL",
"ASA_AUTO_DURATION",
"ASA_NO_RESOLVE_ALIASES",
"ASA_NO_SAML_CACHE"
] |
[]
|
["PORT", "ASA_CREDENTIAL_PROCESS", "AWS_DEFAULT_REGION", "ASA_AWS_ACCOUNT", "ASA_NO_ASK_ROLE", "ASA_DURATION", "ASA_ROLE_ARN", "AWS_PROFILE", "ASA_LOGIN_URL", "ASA_AUTO_DURATION", "ASA_NO_RESOLVE_ALIASES", "ASA_NO_SAML_CACHE"]
|
python
| 12 | 0 | |
main.go
|
// Examcollection-Downloader is a terminal UI based application to download
// old exams from the TU Berlin.
//
// The old exams and other material is provided by the freitagsrunde,
// https://wiki.freitagsrunde.org/Hauptseite.
//
// The CLI creates a folder ``old_exams`` with path specified by the
// ``-p`` flag (defaults to home directory).
//
// Keybindings:
//
// <arrow-keys> - move vertically and horizontally
// <enter> - open exams OR start dowload
// <space> - mark exam to download it
// <ctrl-d> - download all exams for given course
// <ctrl-c> - exit
//
package main
import (
"errors"
"flag"
"io"
"os"
"github.com/jroimartin/gocui"
"github.com/pascaldierich/examcollection-downloader/network"
"github.com/pascaldierich/examcollection-downloader/ui"
)
const url = "https://docs.freitagsrunde.org/Klausuren/"
var p = flag.String("p", "", "Path to download the exams.")
func main() {
flag.Parse()
// Get working file-path for application-folder.
path, err := initDirectory(*p)
if err != nil {
panic(err)
}
// Create new UI.
g, err := gocui.NewGui(gocui.OutputNormal)
if err != nil {
panic(err)
}
defer g.Close()
// Start view controller.
err = viewController(g, path)
if err != nil {
panic(err)
}
// Start main loop.
if err := g.MainLoop(); err != nil && err != gocui.ErrQuit {
panic(err)
}
}
func viewController(g *gocui.Gui, path string) (err error) {
var cl [][2]string // Course list
var el [][2]string // Corresponding exams list
lu := make(map[string]int) // Lookup map for course list
le := make(map[string]int) // Lookup map for exams list
var courseName string // Name of current selected course
var courseLink string // Relative link to current selected course
var selectedExams [][2]string // Selected Exams
cl, err = network.GetCourseList(url)
if err != nil {
return
}
// Fill courses lookup map
for i, c := range cl {
lu[c[network.NameField]] = i
}
// View only needs the course-names, not the links.
var names []string
for _, n := range cl {
names = append(names, n[network.NameField])
}
// Set view's functions:
// ui.getExams returns the list of exam names
// for the currently selected course.
ui.GetExams = func() ([]string, error) {
if courseLink == "" || courseName == "" {
return nil, errors.New("no course selected")
}
el, err = network.GetExamList(url + courseLink)
if err != nil {
return nil, err
}
// Fill exams lookup map
for i, e := range el {
le[e[network.NameField]] = i
}
// Empty selected exams
selectedExams = selectedExams[:0]
// Return to view only the exam names, not the links.
var names []string
for _, n := range el {
names = append(names, n[network.NameField])
}
return names, nil
}
// ui.DownloadExam downloads and saves the exam in corresponding course folder.
ui.DownloadExams = func() (err error) {
for _, e := range selectedExams {
en := e[network.LinkField]
f, err := network.DownloadFile(url + courseLink + en)
if err != nil {
return err
}
defer f.Close()
err = createDir(path + courseName)
if err != nil {
return err
}
err = saveFile(f, path+courseName+en)
}
return
}
// ui.DownloadAllExams downloads and saves all exams in corresponding course folder.
ui.DownloadAllExams = func() (err error) {
// Fill selected exams list
for _, s := range el {
selectedExams = append(selectedExams, s)
}
return ui.DownloadExams()
}
// ui.CourseSelected set's the courseName and courseLink variables.
ui.CourseSelected = func(cn string) error {
i, ok := lu[cn]
if !ok {
return errors.New("could not find courseName in dataset. View edited?")
}
courseLink = cl[i][network.LinkField]
courseName = cl[i][network.NameField]
return nil
}
// ui.ExamSelected add a new exam to the "to-download-list".
ui.ExamSelected = func(en string) error {
i, ok := le[en]
if !ok {
return errors.New(en)
}
selectedExams = append(selectedExams, el[i])
return nil
}
// Init UI.
err = ui.Init(g, names)
if err != nil {
panic(err)
}
return
}
// saveFile write `f` to path `p`
func saveFile(f io.Reader, p string) (err error) {
out, err := os.Create(p)
if err != nil {
return
}
_, err = io.Copy(out, f)
return
}
// initDirectory returns the application path.
func initDirectory(home string) (path string, err error) {
if home == "" {
home, err = getHomeDir()
if err != nil {
return
}
}
path = home + "/old_exams/"
err = createDir(path)
return
}
// getHomeDir return the path to the user's home directory.
func getHomeDir() (dir string, err error) {
dir = os.Getenv("HOME")
if dir == "" {
err = errors.New("No HomeDir")
}
return
}
// createDir checks if directory exists, creates otherwise.
func createDir(p string) (err error) {
_, err = os.Stat(p)
if err == nil {
return
}
if os.IsNotExist(err) {
err = os.Mkdir(p, 0744)
}
return
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
pkg/server/server.go
|
package server
import (
"context"
"errors"
"flag"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"strconv"
"sync"
"time"
"github.com/facebookgo/inject"
"golang.org/x/sync/errgroup"
"github.com/grafana/grafana/pkg/api"
"github.com/grafana/grafana/pkg/api/routing"
"github.com/grafana/grafana/pkg/bus"
_ "github.com/grafana/grafana/pkg/extensions"
"github.com/grafana/grafana/pkg/infra/localcache"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/metrics"
_ "github.com/grafana/grafana/pkg/infra/metrics"
_ "github.com/grafana/grafana/pkg/infra/remotecache"
_ "github.com/grafana/grafana/pkg/infra/serverlock"
_ "github.com/grafana/grafana/pkg/infra/tracing"
_ "github.com/grafana/grafana/pkg/infra/usagestats"
"github.com/grafana/grafana/pkg/login"
"github.com/grafana/grafana/pkg/login/social"
"github.com/grafana/grafana/pkg/middleware"
_ "github.com/grafana/grafana/pkg/plugins"
"github.com/grafana/grafana/pkg/registry"
_ "github.com/grafana/grafana/pkg/services/alerting"
_ "github.com/grafana/grafana/pkg/services/auth"
_ "github.com/grafana/grafana/pkg/services/cleanup"
_ "github.com/grafana/grafana/pkg/services/notifications"
_ "github.com/grafana/grafana/pkg/services/provisioning"
_ "github.com/grafana/grafana/pkg/services/rendering"
_ "github.com/grafana/grafana/pkg/services/search"
_ "github.com/grafana/grafana/pkg/services/sqlstore"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/util/errutil"
)
// Config contains parameters for the New function.
type Config struct {
ConfigFile string
HomePath string
PidFile string
Version string
Commit string
BuildBranch string
Listener net.Listener
}
// New returns a new instance of Server.
func New(cfg Config) (*Server, error) {
rootCtx, shutdownFn := context.WithCancel(context.Background())
childRoutines, childCtx := errgroup.WithContext(rootCtx)
s := &Server{
context: childCtx,
shutdownFn: shutdownFn,
childRoutines: childRoutines,
log: log.New("server"),
cfg: setting.NewCfg(),
configFile: cfg.ConfigFile,
homePath: cfg.HomePath,
pidFile: cfg.PidFile,
version: cfg.Version,
commit: cfg.Commit,
buildBranch: cfg.BuildBranch,
}
if cfg.Listener != nil {
if err := s.init(&cfg); err != nil {
return nil, err
}
}
return s, nil
}
// Server is responsible for managing the lifecycle of services.
type Server struct {
context context.Context
shutdownFn context.CancelFunc
childRoutines *errgroup.Group
log log.Logger
cfg *setting.Cfg
shutdownReason string
shutdownInProgress bool
isInitialized bool
mtx sync.Mutex
configFile string
homePath string
pidFile string
version string
commit string
buildBranch string
HTTPServer *api.HTTPServer `inject:""`
}
// init initializes the server and its services.
func (s *Server) init(cfg *Config) error {
s.mtx.Lock()
defer s.mtx.Unlock()
if s.isInitialized {
return nil
}
s.isInitialized = true
s.loadConfiguration()
s.writePIDFile()
if err := metrics.SetEnvironmentInformation(s.cfg.MetricsGrafanaEnvironmentInfo); err != nil {
return err
}
login.Init()
social.NewOAuthService()
services := registry.GetServices()
if err := s.buildServiceGraph(services); err != nil {
return err
}
// Initialize services.
for _, service := range services {
if registry.IsDisabled(service.Instance) {
continue
}
if cfg != nil {
if httpS, ok := service.Instance.(*api.HTTPServer); ok {
// Configure the api.HTTPServer if necessary
// Hopefully we can find a better solution, maybe with a more advanced DI framework, f.ex. Dig?
if cfg.Listener != nil {
s.log.Debug("Using provided listener for HTTP server")
httpS.Listener = cfg.Listener
}
}
}
if err := service.Instance.Init(); err != nil {
return errutil.Wrapf(err, "Service init failed")
}
}
return nil
}
// Run initializes and starts services. This will block until all services have
// exited. To initiate shutdown, call the Shutdown method in another goroutine.
func (s *Server) Run() (err error) {
if err = s.init(nil); err != nil {
return
}
services := registry.GetServices()
// Start background services.
for _, svc := range services {
service, ok := svc.Instance.(registry.BackgroundService)
if !ok {
continue
}
if registry.IsDisabled(svc.Instance) {
continue
}
// Variable is needed for accessing loop variable in callback
descriptor := svc
s.childRoutines.Go(func() error {
// Don't start new services when server is shutting down.
if s.shutdownInProgress {
return nil
}
err := service.Run(s.context)
if err != nil {
// Mark that we are in shutdown mode
// So no more services are started
s.shutdownInProgress = true
if err != context.Canceled {
// Server has crashed.
s.log.Error("Stopped "+descriptor.Name, "reason", err)
} else {
s.log.Debug("Stopped "+descriptor.Name, "reason", err)
}
return err
}
return nil
})
}
defer func() {
s.log.Debug("Waiting on services...")
if waitErr := s.childRoutines.Wait(); waitErr != nil && !errors.Is(waitErr, context.Canceled) {
s.log.Error("A service failed", "err", waitErr)
if err == nil {
err = waitErr
}
}
}()
s.notifySystemd("READY=1")
return nil
}
func (s *Server) Shutdown(reason string) {
s.log.Info("Shutdown started", "reason", reason)
s.shutdownReason = reason
s.shutdownInProgress = true
// call cancel func on root context
s.shutdownFn()
// wait for child routines
if err := s.childRoutines.Wait(); err != nil && !errors.Is(err, context.Canceled) {
s.log.Error("Failed waiting for services to shutdown", "err", err)
}
}
// ExitCode returns an exit code for a given error.
func (s *Server) ExitCode(reason error) int {
code := 1
if reason == context.Canceled && s.shutdownReason != "" {
reason = fmt.Errorf(s.shutdownReason)
code = 0
}
s.log.Error("Server shutdown", "reason", reason)
return code
}
// writePIDFile retrieves the current process ID and writes it to file.
func (s *Server) writePIDFile() {
if s.pidFile == "" {
return
}
// Ensure the required directory structure exists.
err := os.MkdirAll(filepath.Dir(s.pidFile), 0700)
if err != nil {
s.log.Error("Failed to verify pid directory", "error", err)
os.Exit(1)
}
// Retrieve the PID and write it to file.
pid := strconv.Itoa(os.Getpid())
if err := ioutil.WriteFile(s.pidFile, []byte(pid), 0644); err != nil {
s.log.Error("Failed to write pidfile", "error", err)
os.Exit(1)
}
s.log.Info("Writing PID file", "path", s.pidFile, "pid", pid)
}
// buildServiceGraph builds a graph of services and their dependencies.
func (s *Server) buildServiceGraph(services []*registry.Descriptor) error {
// Specify service dependencies.
objs := []interface{}{
bus.GetBus(),
s.cfg,
routing.NewRouteRegister(middleware.RequestMetrics(s.cfg), middleware.RequestTracing),
localcache.New(5*time.Minute, 10*time.Minute),
s,
}
for _, service := range services {
objs = append(objs, service.Instance)
}
var serviceGraph inject.Graph
// Provide services and their dependencies to the graph.
for _, obj := range objs {
if err := serviceGraph.Provide(&inject.Object{Value: obj}); err != nil {
return errutil.Wrapf(err, "Failed to provide object to the graph")
}
}
// Resolve services and their dependencies.
if err := serviceGraph.Populate(); err != nil {
return errutil.Wrapf(err, "Failed to populate service dependencies")
}
return nil
}
// loadConfiguration loads settings and configuration from config files.
func (s *Server) loadConfiguration() {
args := &setting.CommandLineArgs{
Config: s.configFile,
HomePath: s.homePath,
Args: flag.Args(),
}
if err := s.cfg.Load(args); err != nil {
fmt.Fprintf(os.Stderr, "Failed to start grafana. error: %s\n", err.Error())
os.Exit(1)
}
s.log.Info("Starting "+setting.ApplicationName,
"version", s.version,
"commit", s.commit,
"branch", s.buildBranch,
"compiled", time.Unix(setting.BuildStamp, 0),
)
s.cfg.LogConfigSources()
}
// notifySystemd sends state notifications to systemd.
func (s *Server) notifySystemd(state string) {
notifySocket := os.Getenv("NOTIFY_SOCKET")
if notifySocket == "" {
s.log.Debug(
"NOTIFY_SOCKET environment variable empty or unset, can't send systemd notification")
return
}
socketAddr := &net.UnixAddr{
Name: notifySocket,
Net: "unixgram",
}
conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr)
if err != nil {
s.log.Warn("Failed to connect to systemd", "err", err, "socket", notifySocket)
return
}
defer conn.Close()
_, err = conn.Write([]byte(state))
if err != nil {
s.log.Warn("Failed to write notification to systemd", "err", err)
}
}
|
[
"\"NOTIFY_SOCKET\""
] |
[] |
[
"NOTIFY_SOCKET"
] |
[]
|
["NOTIFY_SOCKET"]
|
go
| 1 | 0 | |
clients/google-api-services-bigquery/v2/1.31.0/com/google/api/services/bigquery/Bigquery.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.bigquery;
/**
* Service definition for Bigquery (v2).
*
* <p>
* A data platform for customers to create, manage, share and query data.
* </p>
*
* <p>
* For more information about this service, see the
* <a href="https://cloud.google.com/bigquery/" target="_blank">API Documentation</a>
* </p>
*
* <p>
* This service uses {@link BigqueryRequestInitializer} to initialize global parameters via its
* {@link Builder}.
* </p>
*
* @since 1.3
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public class Bigquery extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient {
// Note: Leave this static initializer at the top of the file.
static {
com.google.api.client.util.Preconditions.checkState(
com.google.api.client.googleapis.GoogleUtils.MAJOR_VERSION == 1 &&
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION >= 32 ||
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION == 31 &&
com.google.api.client.googleapis.GoogleUtils.BUGFIX_VERSION >= 1)),
"You are currently running with version %s of google-api-client. " +
"You need at least version 1.31.1 of google-api-client to run version " +
"1.32.1 of the BigQuery API library.", com.google.api.client.googleapis.GoogleUtils.VERSION);
}
/**
* The default encoded root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_ROOT_URL = "https://bigquery.googleapis.com/";
/**
* The default encoded mTLS root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.31
*/
public static final String DEFAULT_MTLS_ROOT_URL = "https://bigquery.mtls.googleapis.com/";
/**
* The default encoded service path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_SERVICE_PATH = "bigquery/v2/";
/**
* The default encoded batch path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.23
*/
public static final String DEFAULT_BATCH_PATH = "batch/bigquery/v2";
/**
* The default encoded base URL of the service. This is determined when the library is generated
* and normally should not be changed.
*/
public static final String DEFAULT_BASE_URL = DEFAULT_ROOT_URL + DEFAULT_SERVICE_PATH;
/**
* Constructor.
*
* <p>
* Use {@link Builder} if you need to specify any of the optional parameters.
* </p>
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public Bigquery(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
this(new Builder(transport, jsonFactory, httpRequestInitializer));
}
/**
* @param builder builder
*/
Bigquery(Builder builder) {
super(builder);
}
@Override
protected void initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest<?> httpClientRequest) throws java.io.IOException {
super.initialize(httpClientRequest);
}
/**
* An accessor for creating requests from the Datasets collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code Bigquery bigquery = new Bigquery(...);}
* {@code Bigquery.Datasets.List request = bigquery.datasets().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Datasets datasets() {
return new Datasets();
}
/**
* The "datasets" collection of methods.
*/
public class Datasets {
/**
* Deletes the dataset specified by the datasetId value. Before you can delete a dataset, you must
* delete all its tables, either manually or by specifying deleteContents. Immediately after
* deletion, you can create another dataset with the same name.
*
* Create a request for the method "datasets.delete".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param projectId Project ID of the dataset being deleted
* @param datasetId Dataset ID of dataset being deleted
* @return the request
*/
public Delete delete(java.lang.String projectId, java.lang.String datasetId) throws java.io.IOException {
Delete result = new Delete(projectId, datasetId);
initialize(result);
return result;
}
public class Delete extends BigqueryRequest<Void> {
private static final String REST_PATH = "projects/{projectId}/datasets/{datasetId}";
/**
* Deletes the dataset specified by the datasetId value. Before you can delete a dataset, you must
* delete all its tables, either manually or by specifying deleteContents. Immediately after
* deletion, you can create another dataset with the same name.
*
* Create a request for the method "datasets.delete".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Project ID of the dataset being deleted
* @param datasetId Dataset ID of dataset being deleted
* @since 1.13
*/
protected Delete(java.lang.String projectId, java.lang.String datasetId) {
super(Bigquery.this, "DELETE", REST_PATH, null, Void.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
this.datasetId = com.google.api.client.util.Preconditions.checkNotNull(datasetId, "Required parameter datasetId must be specified.");
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUserIp(java.lang.String userIp) {
return (Delete) super.setUserIp(userIp);
}
/** Project ID of the dataset being deleted */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Project ID of the dataset being deleted
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Project ID of the dataset being deleted */
public Delete setProjectId(java.lang.String projectId) {
this.projectId = projectId;
return this;
}
/** Dataset ID of dataset being deleted */
@com.google.api.client.util.Key
private java.lang.String datasetId;
/** Dataset ID of dataset being deleted
*/
public java.lang.String getDatasetId() {
return datasetId;
}
/** Dataset ID of dataset being deleted */
public Delete setDatasetId(java.lang.String datasetId) {
this.datasetId = datasetId;
return this;
}
/**
* If True, delete all the tables in the dataset. If False and the dataset contains tables,
* the request will fail. Default is False
*/
@com.google.api.client.util.Key
private java.lang.Boolean deleteContents;
/** If True, delete all the tables in the dataset. If False and the dataset contains tables, the
request will fail. Default is False
*/
public java.lang.Boolean getDeleteContents() {
return deleteContents;
}
/**
* If True, delete all the tables in the dataset. If False and the dataset contains tables,
* the request will fail. Default is False
*/
public Delete setDeleteContents(java.lang.Boolean deleteContents) {
this.deleteContents = deleteContents;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Returns the dataset specified by datasetID.
*
* Create a request for the method "datasets.get".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param projectId Project ID of the requested dataset
* @param datasetId Dataset ID of the requested dataset
* @return the request
*/
public Get get(java.lang.String projectId, java.lang.String datasetId) throws java.io.IOException {
Get result = new Get(projectId, datasetId);
initialize(result);
return result;
}
public class Get extends BigqueryRequest<com.google.api.services.bigquery.model.Dataset> {
private static final String REST_PATH = "projects/{projectId}/datasets/{datasetId}";
/**
* Returns the dataset specified by datasetID.
*
* Create a request for the method "datasets.get".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Project ID of the requested dataset
* @param datasetId Dataset ID of the requested dataset
* @since 1.13
*/
protected Get(java.lang.String projectId, java.lang.String datasetId) {
super(Bigquery.this, "GET", REST_PATH, null, com.google.api.services.bigquery.model.Dataset.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
this.datasetId = com.google.api.client.util.Preconditions.checkNotNull(datasetId, "Required parameter datasetId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUserIp(java.lang.String userIp) {
return (Get) super.setUserIp(userIp);
}
/** Project ID of the requested dataset */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Project ID of the requested dataset
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Project ID of the requested dataset */
public Get setProjectId(java.lang.String projectId) {
this.projectId = projectId;
return this;
}
/** Dataset ID of the requested dataset */
@com.google.api.client.util.Key
private java.lang.String datasetId;
/** Dataset ID of the requested dataset
*/
public java.lang.String getDatasetId() {
return datasetId;
}
/** Dataset ID of the requested dataset */
public Get setDatasetId(java.lang.String datasetId) {
this.datasetId = datasetId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Creates a new empty dataset.
*
* Create a request for the method "datasets.insert".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link Insert#execute()} method to invoke the remote operation.
*
* @param projectId Project ID of the new dataset
* @param content the {@link com.google.api.services.bigquery.model.Dataset}
* @return the request
*/
public Insert insert(java.lang.String projectId, com.google.api.services.bigquery.model.Dataset content) throws java.io.IOException {
Insert result = new Insert(projectId, content);
initialize(result);
return result;
}
public class Insert extends BigqueryRequest<com.google.api.services.bigquery.model.Dataset> {
private static final String REST_PATH = "projects/{projectId}/datasets";
/**
* Creates a new empty dataset.
*
* Create a request for the method "datasets.insert".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link Insert#execute()} method to invoke the remote operation.
* <p> {@link
* Insert#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Project ID of the new dataset
* @param content the {@link com.google.api.services.bigquery.model.Dataset}
* @since 1.13
*/
protected Insert(java.lang.String projectId, com.google.api.services.bigquery.model.Dataset content) {
super(Bigquery.this, "POST", REST_PATH, content, com.google.api.services.bigquery.model.Dataset.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
}
@Override
public Insert setAlt(java.lang.String alt) {
return (Insert) super.setAlt(alt);
}
@Override
public Insert setFields(java.lang.String fields) {
return (Insert) super.setFields(fields);
}
@Override
public Insert setKey(java.lang.String key) {
return (Insert) super.setKey(key);
}
@Override
public Insert setOauthToken(java.lang.String oauthToken) {
return (Insert) super.setOauthToken(oauthToken);
}
@Override
public Insert setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Insert) super.setPrettyPrint(prettyPrint);
}
@Override
public Insert setQuotaUser(java.lang.String quotaUser) {
return (Insert) super.setQuotaUser(quotaUser);
}
@Override
public Insert setUserIp(java.lang.String userIp) {
return (Insert) super.setUserIp(userIp);
}
/** Project ID of the new dataset */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Project ID of the new dataset
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Project ID of the new dataset */
public Insert setProjectId(java.lang.String projectId) {
this.projectId = projectId;
return this;
}
@Override
public Insert set(String parameterName, Object value) {
return (Insert) super.set(parameterName, value);
}
}
/**
* Lists all datasets in the specified project to which you have been granted the READER dataset
* role.
*
* Create a request for the method "datasets.list".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param projectId Project ID of the datasets to be listed
* @return the request
*/
public List list(java.lang.String projectId) throws java.io.IOException {
List result = new List(projectId);
initialize(result);
return result;
}
public class List extends BigqueryRequest<com.google.api.services.bigquery.model.DatasetList> {
private static final String REST_PATH = "projects/{projectId}/datasets";
/**
* Lists all datasets in the specified project to which you have been granted the READER dataset
* role.
*
* Create a request for the method "datasets.list".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Project ID of the datasets to be listed
* @since 1.13
*/
protected List(java.lang.String projectId) {
super(Bigquery.this, "GET", REST_PATH, null, com.google.api.services.bigquery.model.DatasetList.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUserIp(java.lang.String userIp) {
return (List) super.setUserIp(userIp);
}
/** Project ID of the datasets to be listed */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Project ID of the datasets to be listed
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Project ID of the datasets to be listed */
public List setProjectId(java.lang.String projectId) {
this.projectId = projectId;
return this;
}
/** Whether to list all datasets, including hidden ones */
@com.google.api.client.util.Key
private java.lang.Boolean all;
/** Whether to list all datasets, including hidden ones
*/
public java.lang.Boolean getAll() {
return all;
}
/** Whether to list all datasets, including hidden ones */
public List setAll(java.lang.Boolean all) {
this.all = all;
return this;
}
/**
* An expression for filtering the results of the request by label. The syntax is
* "labels.[:]". Multiple filters can be ANDed together by connecting with a space. Example:
* "labels.department:receiving labels.active". See Filtering datasets using labels for
* details.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** An expression for filtering the results of the request by label. The syntax is "labels.[:]".
Multiple filters can be ANDed together by connecting with a space. Example:
"labels.department:receiving labels.active". See Filtering datasets using labels for details.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* An expression for filtering the results of the request by label. The syntax is
* "labels.[:]". Multiple filters can be ANDed together by connecting with a space. Example:
* "labels.department:receiving labels.active". See Filtering datasets using labels for
* details.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/** The maximum number of results to return */
@com.google.api.client.util.Key
private java.lang.Long maxResults;
/** The maximum number of results to return
*/
public java.lang.Long getMaxResults() {
return maxResults;
}
/** The maximum number of results to return */
public List setMaxResults(java.lang.Long maxResults) {
this.maxResults = maxResults;
return this;
}
/** Page token, returned by a previous call, to request the next page of results */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** Page token, returned by a previous call, to request the next page of results
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** Page token, returned by a previous call, to request the next page of results */
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Updates information in an existing dataset. The update method replaces the entire dataset
* resource, whereas the patch method only replaces fields that are provided in the submitted
* dataset resource. This method supports patch semantics.
*
* Create a request for the method "datasets.patch".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link Patch#execute()} method to invoke the remote operation.
*
* @param projectId Project ID of the dataset being updated
* @param datasetId Dataset ID of the dataset being updated
* @param content the {@link com.google.api.services.bigquery.model.Dataset}
* @return the request
*/
public Patch patch(java.lang.String projectId, java.lang.String datasetId, com.google.api.services.bigquery.model.Dataset content) throws java.io.IOException {
Patch result = new Patch(projectId, datasetId, content);
initialize(result);
return result;
}
public class Patch extends BigqueryRequest<com.google.api.services.bigquery.model.Dataset> {
private static final String REST_PATH = "projects/{projectId}/datasets/{datasetId}";
/**
* Updates information in an existing dataset. The update method replaces the entire dataset
* resource, whereas the patch method only replaces fields that are provided in the submitted
* dataset resource. This method supports patch semantics.
*
* Create a request for the method "datasets.patch".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link Patch#execute()} method to invoke the remote operation.
* <p> {@link
* Patch#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Project ID of the dataset being updated
* @param datasetId Dataset ID of the dataset being updated
* @param content the {@link com.google.api.services.bigquery.model.Dataset}
* @since 1.13
*/
protected Patch(java.lang.String projectId, java.lang.String datasetId, com.google.api.services.bigquery.model.Dataset content) {
super(Bigquery.this, "PATCH", REST_PATH, content, com.google.api.services.bigquery.model.Dataset.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
this.datasetId = com.google.api.client.util.Preconditions.checkNotNull(datasetId, "Required parameter datasetId must be specified.");
}
@Override
public Patch setAlt(java.lang.String alt) {
return (Patch) super.setAlt(alt);
}
@Override
public Patch setFields(java.lang.String fields) {
return (Patch) super.setFields(fields);
}
@Override
public Patch setKey(java.lang.String key) {
return (Patch) super.setKey(key);
}
@Override
public Patch setOauthToken(java.lang.String oauthToken) {
return (Patch) super.setOauthToken(oauthToken);
}
@Override
public Patch setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Patch) super.setPrettyPrint(prettyPrint);
}
@Override
public Patch setQuotaUser(java.lang.String quotaUser) {
return (Patch) super.setQuotaUser(quotaUser);
}
@Override
public Patch setUserIp(java.lang.String userIp) {
return (Patch) super.setUserIp(userIp);
}
/** Project ID of the dataset being updated */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Project ID of the dataset being updated
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Project ID of the dataset being updated */
public Patch setProjectId(java.lang.String projectId) {
this.projectId = projectId;
return this;
}
/** Dataset ID of the dataset being updated */
@com.google.api.client.util.Key
private java.lang.String datasetId;
/** Dataset ID of the dataset being updated
*/
public java.lang.String getDatasetId() {
return datasetId;
}
/** Dataset ID of the dataset being updated */
public Patch setDatasetId(java.lang.String datasetId) {
this.datasetId = datasetId;
return this;
}
@Override
public Patch set(String parameterName, Object value) {
return (Patch) super.set(parameterName, value);
}
}
/**
* Updates information in an existing dataset. The update method replaces the entire dataset
* resource, whereas the patch method only replaces fields that are provided in the submitted
* dataset resource.
*
* Create a request for the method "datasets.update".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link Update#execute()} method to invoke the remote operation.
*
* @param projectId Project ID of the dataset being updated
* @param datasetId Dataset ID of the dataset being updated
* @param content the {@link com.google.api.services.bigquery.model.Dataset}
* @return the request
*/
public Update update(java.lang.String projectId, java.lang.String datasetId, com.google.api.services.bigquery.model.Dataset content) throws java.io.IOException {
Update result = new Update(projectId, datasetId, content);
initialize(result);
return result;
}
public class Update extends BigqueryRequest<com.google.api.services.bigquery.model.Dataset> {
private static final String REST_PATH = "projects/{projectId}/datasets/{datasetId}";
/**
* Updates information in an existing dataset. The update method replaces the entire dataset
* resource, whereas the patch method only replaces fields that are provided in the submitted
* dataset resource.
*
* Create a request for the method "datasets.update".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link Update#execute()} method to invoke the remote operation.
* <p> {@link
* Update#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Project ID of the dataset being updated
* @param datasetId Dataset ID of the dataset being updated
* @param content the {@link com.google.api.services.bigquery.model.Dataset}
* @since 1.13
*/
protected Update(java.lang.String projectId, java.lang.String datasetId, com.google.api.services.bigquery.model.Dataset content) {
super(Bigquery.this, "PUT", REST_PATH, content, com.google.api.services.bigquery.model.Dataset.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
this.datasetId = com.google.api.client.util.Preconditions.checkNotNull(datasetId, "Required parameter datasetId must be specified.");
}
@Override
public Update setAlt(java.lang.String alt) {
return (Update) super.setAlt(alt);
}
@Override
public Update setFields(java.lang.String fields) {
return (Update) super.setFields(fields);
}
@Override
public Update setKey(java.lang.String key) {
return (Update) super.setKey(key);
}
@Override
public Update setOauthToken(java.lang.String oauthToken) {
return (Update) super.setOauthToken(oauthToken);
}
@Override
public Update setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Update) super.setPrettyPrint(prettyPrint);
}
@Override
public Update setQuotaUser(java.lang.String quotaUser) {
return (Update) super.setQuotaUser(quotaUser);
}
@Override
public Update setUserIp(java.lang.String userIp) {
return (Update) super.setUserIp(userIp);
}
/** Project ID of the dataset being updated */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Project ID of the dataset being updated
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Project ID of the dataset being updated */
public Update setProjectId(java.lang.String projectId) {
this.projectId = projectId;
return this;
}
/** Dataset ID of the dataset being updated */
@com.google.api.client.util.Key
private java.lang.String datasetId;
/** Dataset ID of the dataset being updated
*/
public java.lang.String getDatasetId() {
return datasetId;
}
/** Dataset ID of the dataset being updated */
public Update setDatasetId(java.lang.String datasetId) {
this.datasetId = datasetId;
return this;
}
@Override
public Update set(String parameterName, Object value) {
return (Update) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Jobs collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code Bigquery bigquery = new Bigquery(...);}
* {@code Bigquery.Jobs.List request = bigquery.jobs().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Jobs jobs() {
return new Jobs();
}
/**
* The "jobs" collection of methods.
*/
public class Jobs {
/**
* Requests that a job be cancelled. This call will return immediately, and the client will need to
* poll for the job status to see if the cancel completed successfully. Cancelled jobs may still
* incur costs.
*
* Create a request for the method "jobs.cancel".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link Cancel#execute()} method to invoke the remote operation.
*
* @param projectId [Required] Project ID of the job to cancel
* @param jobId [Required] Job ID of the job to cancel
* @return the request
*/
public Cancel cancel(java.lang.String projectId, java.lang.String jobId) throws java.io.IOException {
Cancel result = new Cancel(projectId, jobId);
initialize(result);
return result;
}
public class Cancel extends BigqueryRequest<com.google.api.services.bigquery.model.JobCancelResponse> {
private static final String REST_PATH = "projects/{projectId}/jobs/{jobId}/cancel";
/**
* Requests that a job be cancelled. This call will return immediately, and the client will need
* to poll for the job status to see if the cancel completed successfully. Cancelled jobs may
* still incur costs.
*
* Create a request for the method "jobs.cancel".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link Cancel#execute()} method to invoke the remote operation.
* <p> {@link
* Cancel#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId [Required] Project ID of the job to cancel
* @param jobId [Required] Job ID of the job to cancel
* @since 1.13
*/
protected Cancel(java.lang.String projectId, java.lang.String jobId) {
super(Bigquery.this, "POST", REST_PATH, null, com.google.api.services.bigquery.model.JobCancelResponse.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
this.jobId = com.google.api.client.util.Preconditions.checkNotNull(jobId, "Required parameter jobId must be specified.");
}
@Override
public Cancel setAlt(java.lang.String alt) {
return (Cancel) super.setAlt(alt);
}
@Override
public Cancel setFields(java.lang.String fields) {
return (Cancel) super.setFields(fields);
}
@Override
public Cancel setKey(java.lang.String key) {
return (Cancel) super.setKey(key);
}
@Override
public Cancel setOauthToken(java.lang.String oauthToken) {
return (Cancel) super.setOauthToken(oauthToken);
}
@Override
public Cancel setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Cancel) super.setPrettyPrint(prettyPrint);
}
@Override
public Cancel setQuotaUser(java.lang.String quotaUser) {
return (Cancel) super.setQuotaUser(quotaUser);
}
@Override
public Cancel setUserIp(java.lang.String userIp) {
return (Cancel) super.setUserIp(userIp);
}
/** [Required] Project ID of the job to cancel */
@com.google.api.client.util.Key
private java.lang.String projectId;
/**[ Required] Project ID of the job to cancel
[
*/
public java.lang.String getProjectId() {
return projectId;
}
/** [Required] Project ID of the job to cancel */
public Cancel setProjectId(java.lang.String projectId) {
this.projectId = projectId;
return this;
}
/** [Required] Job ID of the job to cancel */
@com.google.api.client.util.Key
private java.lang.String jobId;
/**[ Required] Job ID of the job to cancel
[
*/
public java.lang.String getJobId() {
return jobId;
}
/** [Required] Job ID of the job to cancel */
public Cancel setJobId(java.lang.String jobId) {
this.jobId = jobId;
return this;
}
/**
* The geographic location of the job. Required except for US and EU. See details at
* https://cloud.google.com/bigquery/docs/locations#specifying_your_location.
*/
@com.google.api.client.util.Key
private java.lang.String location;
/** The geographic location of the job. Required except for US and EU. See details at
https://cloud.google.com/bigquery/docs/locations#specifying_your_location.
*/
public java.lang.String getLocation() {
return location;
}
/**
* The geographic location of the job. Required except for US and EU. See details at
* https://cloud.google.com/bigquery/docs/locations#specifying_your_location.
*/
public Cancel setLocation(java.lang.String location) {
this.location = location;
return this;
}
@Override
public Cancel set(String parameterName, Object value) {
return (Cancel) super.set(parameterName, value);
}
}
/**
* Requests that a job is deleted. This call will return when the job is deleted. This method is
* available in limited preview.
*
* Create a request for the method "jobs.delete".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param projectId Required. Project ID of the job to be deleted.
* @param jobId Required. Job ID of the job to be deleted. If this is a parent job which has child jobs, all child
* jobs will be deleted as well. Deletion of child jobs directly is not allowed.
* @return the request
*/
public Delete delete(java.lang.String projectId, java.lang.String jobId) throws java.io.IOException {
Delete result = new Delete(projectId, jobId);
initialize(result);
return result;
}
public class Delete extends BigqueryRequest<Void> {
private static final String REST_PATH = "projects/{+projectId}/jobs/{+jobId}/delete";
private final java.util.regex.Pattern PROJECT_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern JOB_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Requests that a job is deleted. This call will return when the job is deleted. This method is
* available in limited preview.
*
* Create a request for the method "jobs.delete".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Required. Project ID of the job to be deleted.
* @param jobId Required. Job ID of the job to be deleted. If this is a parent job which has child jobs, all child
* jobs will be deleted as well. Deletion of child jobs directly is not allowed.
* @since 1.13
*/
protected Delete(java.lang.String projectId, java.lang.String jobId) {
super(Bigquery.this, "DELETE", REST_PATH, null, Void.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_ID_PATTERN.matcher(projectId).matches(),
"Parameter projectId must conform to the pattern " +
"^[^/]+$");
}
this.jobId = com.google.api.client.util.Preconditions.checkNotNull(jobId, "Required parameter jobId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(JOB_ID_PATTERN.matcher(jobId).matches(),
"Parameter jobId must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUserIp(java.lang.String userIp) {
return (Delete) super.setUserIp(userIp);
}
/** Required. Project ID of the job to be deleted. */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Required. Project ID of the job to be deleted.
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Required. Project ID of the job to be deleted. */
public Delete setProjectId(java.lang.String projectId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_ID_PATTERN.matcher(projectId).matches(),
"Parameter projectId must conform to the pattern " +
"^[^/]+$");
}
this.projectId = projectId;
return this;
}
/**
* Required. Job ID of the job to be deleted. If this is a parent job which has child jobs,
* all child jobs will be deleted as well. Deletion of child jobs directly is not allowed.
*/
@com.google.api.client.util.Key
private java.lang.String jobId;
/** Required. Job ID of the job to be deleted. If this is a parent job which has child jobs, all child
jobs will be deleted as well. Deletion of child jobs directly is not allowed.
*/
public java.lang.String getJobId() {
return jobId;
}
/**
* Required. Job ID of the job to be deleted. If this is a parent job which has child jobs,
* all child jobs will be deleted as well. Deletion of child jobs directly is not allowed.
*/
public Delete setJobId(java.lang.String jobId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(JOB_ID_PATTERN.matcher(jobId).matches(),
"Parameter jobId must conform to the pattern " +
"^[^/]+$");
}
this.jobId = jobId;
return this;
}
/**
* The geographic location of the job. Required. See details at:
* https://cloud.google.com/bigquery/docs/locations#specifying_your_location.
*/
@com.google.api.client.util.Key
private java.lang.String location;
/** The geographic location of the job. Required. See details at:
https://cloud.google.com/bigquery/docs/locations#specifying_your_location.
*/
public java.lang.String getLocation() {
return location;
}
/**
* The geographic location of the job. Required. See details at:
* https://cloud.google.com/bigquery/docs/locations#specifying_your_location.
*/
public Delete setLocation(java.lang.String location) {
this.location = location;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Returns information about a specific job. Job information is available for a six month period
* after creation. Requires that you're the person who ran the job, or have the Is Owner project
* role.
*
* Create a request for the method "jobs.get".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param projectId [Required] Project ID of the requested job
* @param jobId [Required] Job ID of the requested job
* @return the request
*/
public Get get(java.lang.String projectId, java.lang.String jobId) throws java.io.IOException {
Get result = new Get(projectId, jobId);
initialize(result);
return result;
}
public class Get extends BigqueryRequest<com.google.api.services.bigquery.model.Job> {
private static final String REST_PATH = "projects/{projectId}/jobs/{jobId}";
/**
* Returns information about a specific job. Job information is available for a six month period
* after creation. Requires that you're the person who ran the job, or have the Is Owner project
* role.
*
* Create a request for the method "jobs.get".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId [Required] Project ID of the requested job
* @param jobId [Required] Job ID of the requested job
* @since 1.13
*/
protected Get(java.lang.String projectId, java.lang.String jobId) {
super(Bigquery.this, "GET", REST_PATH, null, com.google.api.services.bigquery.model.Job.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
this.jobId = com.google.api.client.util.Preconditions.checkNotNull(jobId, "Required parameter jobId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUserIp(java.lang.String userIp) {
return (Get) super.setUserIp(userIp);
}
/** [Required] Project ID of the requested job */
@com.google.api.client.util.Key
private java.lang.String projectId;
/**[ Required] Project ID of the requested job
[
*/
public java.lang.String getProjectId() {
return projectId;
}
/** [Required] Project ID of the requested job */
public Get setProjectId(java.lang.String projectId) {
this.projectId = projectId;
return this;
}
/** [Required] Job ID of the requested job */
@com.google.api.client.util.Key
private java.lang.String jobId;
/**[ Required] Job ID of the requested job
[
*/
public java.lang.String getJobId() {
return jobId;
}
/** [Required] Job ID of the requested job */
public Get setJobId(java.lang.String jobId) {
this.jobId = jobId;
return this;
}
/**
* The geographic location of the job. Required except for US and EU. See details at
* https://cloud.google.com/bigquery/docs/locations#specifying_your_location.
*/
@com.google.api.client.util.Key
private java.lang.String location;
/** The geographic location of the job. Required except for US and EU. See details at
https://cloud.google.com/bigquery/docs/locations#specifying_your_location.
*/
public java.lang.String getLocation() {
return location;
}
/**
* The geographic location of the job. Required except for US and EU. See details at
* https://cloud.google.com/bigquery/docs/locations#specifying_your_location.
*/
public Get setLocation(java.lang.String location) {
this.location = location;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Retrieves the results of a query job.
*
* Create a request for the method "jobs.getQueryResults".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link GetQueryResults#execute()} method to invoke the remote operation.
*
* @param projectId [Required] Project ID of the query job
* @param jobId [Required] Job ID of the query job
* @return the request
*/
public GetQueryResults getQueryResults(java.lang.String projectId, java.lang.String jobId) throws java.io.IOException {
GetQueryResults result = new GetQueryResults(projectId, jobId);
initialize(result);
return result;
}
public class GetQueryResults extends BigqueryRequest<com.google.api.services.bigquery.model.GetQueryResultsResponse> {
private static final String REST_PATH = "projects/{projectId}/queries/{jobId}";
/**
* Retrieves the results of a query job.
*
* Create a request for the method "jobs.getQueryResults".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link GetQueryResults#execute()} method to invoke the remote
* operation. <p> {@link GetQueryResults#initialize(com.google.api.client.googleapis.services.Abst
* ractGoogleClientRequest)} must be called to initialize this instance immediately after invoking
* the constructor. </p>
*
* @param projectId [Required] Project ID of the query job
* @param jobId [Required] Job ID of the query job
* @since 1.13
*/
protected GetQueryResults(java.lang.String projectId, java.lang.String jobId) {
super(Bigquery.this, "GET", REST_PATH, null, com.google.api.services.bigquery.model.GetQueryResultsResponse.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
this.jobId = com.google.api.client.util.Preconditions.checkNotNull(jobId, "Required parameter jobId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public GetQueryResults setAlt(java.lang.String alt) {
return (GetQueryResults) super.setAlt(alt);
}
@Override
public GetQueryResults setFields(java.lang.String fields) {
return (GetQueryResults) super.setFields(fields);
}
@Override
public GetQueryResults setKey(java.lang.String key) {
return (GetQueryResults) super.setKey(key);
}
@Override
public GetQueryResults setOauthToken(java.lang.String oauthToken) {
return (GetQueryResults) super.setOauthToken(oauthToken);
}
@Override
public GetQueryResults setPrettyPrint(java.lang.Boolean prettyPrint) {
return (GetQueryResults) super.setPrettyPrint(prettyPrint);
}
@Override
public GetQueryResults setQuotaUser(java.lang.String quotaUser) {
return (GetQueryResults) super.setQuotaUser(quotaUser);
}
@Override
public GetQueryResults setUserIp(java.lang.String userIp) {
return (GetQueryResults) super.setUserIp(userIp);
}
/** [Required] Project ID of the query job */
@com.google.api.client.util.Key
private java.lang.String projectId;
/**[ Required] Project ID of the query job
[
*/
public java.lang.String getProjectId() {
return projectId;
}
/** [Required] Project ID of the query job */
public GetQueryResults setProjectId(java.lang.String projectId) {
this.projectId = projectId;
return this;
}
/** [Required] Job ID of the query job */
@com.google.api.client.util.Key
private java.lang.String jobId;
/**[ Required] Job ID of the query job
[
*/
public java.lang.String getJobId() {
return jobId;
}
/** [Required] Job ID of the query job */
public GetQueryResults setJobId(java.lang.String jobId) {
this.jobId = jobId;
return this;
}
/**
* The geographic location where the job should run. Required except for US and EU. See
* details at https://cloud.google.com/bigquery/docs/locations#specifying_your_location.
*/
@com.google.api.client.util.Key
private java.lang.String location;
/** The geographic location where the job should run. Required except for US and EU. See details at
https://cloud.google.com/bigquery/docs/locations#specifying_your_location.
*/
public java.lang.String getLocation() {
return location;
}
/**
* The geographic location where the job should run. Required except for US and EU. See
* details at https://cloud.google.com/bigquery/docs/locations#specifying_your_location.
*/
public GetQueryResults setLocation(java.lang.String location) {
this.location = location;
return this;
}
/** Maximum number of results to read */
@com.google.api.client.util.Key
private java.lang.Long maxResults;
/** Maximum number of results to read
*/
public java.lang.Long getMaxResults() {
return maxResults;
}
/** Maximum number of results to read */
public GetQueryResults setMaxResults(java.lang.Long maxResults) {
this.maxResults = maxResults;
return this;
}
/** Page token, returned by a previous call, to request the next page of results */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** Page token, returned by a previous call, to request the next page of results
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** Page token, returned by a previous call, to request the next page of results */
public GetQueryResults setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
/** Zero-based index of the starting row */
@com.google.api.client.util.Key
private java.math.BigInteger startIndex;
/** Zero-based index of the starting row
*/
public java.math.BigInteger getStartIndex() {
return startIndex;
}
/** Zero-based index of the starting row */
public GetQueryResults setStartIndex(java.math.BigInteger startIndex) {
this.startIndex = startIndex;
return this;
}
/**
* How long to wait for the query to complete, in milliseconds, before returning. Default is
* 10 seconds. If the timeout passes before the job completes, the 'jobComplete' field in the
* response will be false
*/
@com.google.api.client.util.Key
private java.lang.Long timeoutMs;
/** How long to wait for the query to complete, in milliseconds, before returning. Default is 10
seconds. If the timeout passes before the job completes, the 'jobComplete' field in the response
will be false
*/
public java.lang.Long getTimeoutMs() {
return timeoutMs;
}
/**
* How long to wait for the query to complete, in milliseconds, before returning. Default is
* 10 seconds. If the timeout passes before the job completes, the 'jobComplete' field in the
* response will be false
*/
public GetQueryResults setTimeoutMs(java.lang.Long timeoutMs) {
this.timeoutMs = timeoutMs;
return this;
}
@Override
public GetQueryResults set(String parameterName, Object value) {
return (GetQueryResults) super.set(parameterName, value);
}
}
/**
* Starts a new asynchronous job. Requires the Can View project role.
*
* Create a request for the method "jobs.insert".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link Insert#execute()} method to invoke the remote operation.
*
* @param projectId Project ID of the project that will be billed for the job
* @param content the {@link com.google.api.services.bigquery.model.Job}
* @return the request
*/
public Insert insert(java.lang.String projectId, com.google.api.services.bigquery.model.Job content) throws java.io.IOException {
Insert result = new Insert(projectId, content);
initialize(result);
return result;
}
/**
* Starts a new asynchronous job. Requires the Can View project role.
*
* Create a request for the method "jobs.insert".
*
* This request holds the parameters needed by the the bigquery server. After setting any optional
* parameters, call the {@link Insert#execute()} method to invoke the remote operation.
*
* <p>
* This method should be used for uploading media content.
* </p>
*
* @param projectId Project ID of the project that will be billed for the job
* @param content the {@link com.google.api.services.bigquery.model.Job} media metadata or {@code null} if none
* @param mediaContent The media HTTP content or {@code null} if none.
* @return the request
* @throws java.io.IOException if the initialization of the request fails
*/
public Insert insert(java.lang.String projectId, com.google.api.services.bigquery.model.Job content, com.google.api.client.http.AbstractInputStreamContent mediaContent) throws java.io.IOException {
Insert result = new Insert(projectId, content, mediaContent);
initialize(result);
return result;
}
public class Insert extends BigqueryRequest<com.google.api.services.bigquery.model.Job> {
private static final String REST_PATH = "projects/{projectId}/jobs";
/**
* Starts a new asynchronous job. Requires the Can View project role.
*
* Create a request for the method "jobs.insert".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link Insert#execute()} method to invoke the remote operation.
* <p> {@link
* Insert#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Project ID of the project that will be billed for the job
* @param content the {@link com.google.api.services.bigquery.model.Job}
* @since 1.13
*/
protected Insert(java.lang.String projectId, com.google.api.services.bigquery.model.Job content) {
super(Bigquery.this, "POST", REST_PATH, content, com.google.api.services.bigquery.model.Job.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
}
/**
* Starts a new asynchronous job. Requires the Can View project role.
*
* Create a request for the method "jobs.insert".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link Insert#execute()} method to invoke the remote operation.
* <p> {@link
* Insert#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* <p>
* This constructor should be used for uploading media content.
* </p>
*
* @param projectId Project ID of the project that will be billed for the job
* @param content the {@link com.google.api.services.bigquery.model.Job} media metadata or {@code null} if none
* @param mediaContent The media HTTP content or {@code null} if none.
* @since 1.13
*/
protected Insert(java.lang.String projectId, com.google.api.services.bigquery.model.Job content, com.google.api.client.http.AbstractInputStreamContent mediaContent) {
super(Bigquery.this, "POST", "/upload/" + getServicePath() + REST_PATH, content, com.google.api.services.bigquery.model.Job.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
initializeMediaUpload(mediaContent);
}
@Override
public Insert setAlt(java.lang.String alt) {
return (Insert) super.setAlt(alt);
}
@Override
public Insert setFields(java.lang.String fields) {
return (Insert) super.setFields(fields);
}
@Override
public Insert setKey(java.lang.String key) {
return (Insert) super.setKey(key);
}
@Override
public Insert setOauthToken(java.lang.String oauthToken) {
return (Insert) super.setOauthToken(oauthToken);
}
@Override
public Insert setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Insert) super.setPrettyPrint(prettyPrint);
}
@Override
public Insert setQuotaUser(java.lang.String quotaUser) {
return (Insert) super.setQuotaUser(quotaUser);
}
@Override
public Insert setUserIp(java.lang.String userIp) {
return (Insert) super.setUserIp(userIp);
}
/** Project ID of the project that will be billed for the job */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Project ID of the project that will be billed for the job
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Project ID of the project that will be billed for the job */
public Insert setProjectId(java.lang.String projectId) {
this.projectId = projectId;
return this;
}
@Override
public Insert set(String parameterName, Object value) {
return (Insert) super.set(parameterName, value);
}
}
/**
* Lists all jobs that you started in the specified project. Job information is available for a six
* month period after creation. The job list is sorted in reverse chronological order, by job
* creation time. Requires the Can View project role, or the Is Owner project role if you set the
* allUsers property.
*
* Create a request for the method "jobs.list".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param projectId Project ID of the jobs to list
* @return the request
*/
public List list(java.lang.String projectId) throws java.io.IOException {
List result = new List(projectId);
initialize(result);
return result;
}
public class List extends BigqueryRequest<com.google.api.services.bigquery.model.JobList> {
private static final String REST_PATH = "projects/{projectId}/jobs";
/**
* Lists all jobs that you started in the specified project. Job information is available for a
* six month period after creation. The job list is sorted in reverse chronological order, by job
* creation time. Requires the Can View project role, or the Is Owner project role if you set the
* allUsers property.
*
* Create a request for the method "jobs.list".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Project ID of the jobs to list
* @since 1.13
*/
protected List(java.lang.String projectId) {
super(Bigquery.this, "GET", REST_PATH, null, com.google.api.services.bigquery.model.JobList.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUserIp(java.lang.String userIp) {
return (List) super.setUserIp(userIp);
}
/** Project ID of the jobs to list */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Project ID of the jobs to list
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Project ID of the jobs to list */
public List setProjectId(java.lang.String projectId) {
this.projectId = projectId;
return this;
}
/** Whether to display jobs owned by all users in the project. Default false */
@com.google.api.client.util.Key
private java.lang.Boolean allUsers;
/** Whether to display jobs owned by all users in the project. Default false
*/
public java.lang.Boolean getAllUsers() {
return allUsers;
}
/** Whether to display jobs owned by all users in the project. Default false */
public List setAllUsers(java.lang.Boolean allUsers) {
this.allUsers = allUsers;
return this;
}
/**
* Max value for job creation time, in milliseconds since the POSIX epoch. If set, only jobs
* created before or at this timestamp are returned
*/
@com.google.api.client.util.Key
private java.math.BigInteger maxCreationTime;
/** Max value for job creation time, in milliseconds since the POSIX epoch. If set, only jobs created
before or at this timestamp are returned
*/
public java.math.BigInteger getMaxCreationTime() {
return maxCreationTime;
}
/**
* Max value for job creation time, in milliseconds since the POSIX epoch. If set, only jobs
* created before or at this timestamp are returned
*/
public List setMaxCreationTime(java.math.BigInteger maxCreationTime) {
this.maxCreationTime = maxCreationTime;
return this;
}
/** Maximum number of results to return */
@com.google.api.client.util.Key
private java.lang.Long maxResults;
/** Maximum number of results to return
*/
public java.lang.Long getMaxResults() {
return maxResults;
}
/** Maximum number of results to return */
public List setMaxResults(java.lang.Long maxResults) {
this.maxResults = maxResults;
return this;
}
/**
* Min value for job creation time, in milliseconds since the POSIX epoch. If set, only jobs
* created after or at this timestamp are returned
*/
@com.google.api.client.util.Key
private java.math.BigInteger minCreationTime;
/** Min value for job creation time, in milliseconds since the POSIX epoch. If set, only jobs created
after or at this timestamp are returned
*/
public java.math.BigInteger getMinCreationTime() {
return minCreationTime;
}
/**
* Min value for job creation time, in milliseconds since the POSIX epoch. If set, only jobs
* created after or at this timestamp are returned
*/
public List setMinCreationTime(java.math.BigInteger minCreationTime) {
this.minCreationTime = minCreationTime;
return this;
}
/** Page token, returned by a previous call, to request the next page of results */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** Page token, returned by a previous call, to request the next page of results
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** Page token, returned by a previous call, to request the next page of results */
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
/**
* If set, retrieves only jobs whose parent is this job. Otherwise, retrieves only jobs which
* have no parent
*/
@com.google.api.client.util.Key
private java.lang.String parentJobId;
/** If set, retrieves only jobs whose parent is this job. Otherwise, retrieves only jobs which have no
parent
*/
public java.lang.String getParentJobId() {
return parentJobId;
}
/**
* If set, retrieves only jobs whose parent is this job. Otherwise, retrieves only jobs which
* have no parent
*/
public List setParentJobId(java.lang.String parentJobId) {
this.parentJobId = parentJobId;
return this;
}
/** Restrict information returned to a set of selected fields */
@com.google.api.client.util.Key
private java.lang.String projection;
/** Restrict information returned to a set of selected fields
*/
public java.lang.String getProjection() {
return projection;
}
/** Restrict information returned to a set of selected fields */
public List setProjection(java.lang.String projection) {
this.projection = projection;
return this;
}
/** Filter for job state */
@com.google.api.client.util.Key
private java.util.List<java.lang.String> stateFilter;
/** Filter for job state
*/
public java.util.List<java.lang.String> getStateFilter() {
return stateFilter;
}
/** Filter for job state */
public List setStateFilter(java.util.List<java.lang.String> stateFilter) {
this.stateFilter = stateFilter;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Runs a BigQuery SQL query synchronously and returns query results if the query completes within a
* specified timeout.
*
* Create a request for the method "jobs.query".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link Query#execute()} method to invoke the remote operation.
*
* @param projectId Project ID of the project billed for the query
* @param content the {@link com.google.api.services.bigquery.model.QueryRequest}
* @return the request
*/
public Query query(java.lang.String projectId, com.google.api.services.bigquery.model.QueryRequest content) throws java.io.IOException {
Query result = new Query(projectId, content);
initialize(result);
return result;
}
public class Query extends BigqueryRequest<com.google.api.services.bigquery.model.QueryResponse> {
private static final String REST_PATH = "projects/{projectId}/queries";
/**
* Runs a BigQuery SQL query synchronously and returns query results if the query completes within
* a specified timeout.
*
* Create a request for the method "jobs.query".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link Query#execute()} method to invoke the remote operation.
* <p> {@link
* Query#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Project ID of the project billed for the query
* @param content the {@link com.google.api.services.bigquery.model.QueryRequest}
* @since 1.13
*/
protected Query(java.lang.String projectId, com.google.api.services.bigquery.model.QueryRequest content) {
super(Bigquery.this, "POST", REST_PATH, content, com.google.api.services.bigquery.model.QueryResponse.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
}
@Override
public Query setAlt(java.lang.String alt) {
return (Query) super.setAlt(alt);
}
@Override
public Query setFields(java.lang.String fields) {
return (Query) super.setFields(fields);
}
@Override
public Query setKey(java.lang.String key) {
return (Query) super.setKey(key);
}
@Override
public Query setOauthToken(java.lang.String oauthToken) {
return (Query) super.setOauthToken(oauthToken);
}
@Override
public Query setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Query) super.setPrettyPrint(prettyPrint);
}
@Override
public Query setQuotaUser(java.lang.String quotaUser) {
return (Query) super.setQuotaUser(quotaUser);
}
@Override
public Query setUserIp(java.lang.String userIp) {
return (Query) super.setUserIp(userIp);
}
/** Project ID of the project billed for the query */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Project ID of the project billed for the query
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Project ID of the project billed for the query */
public Query setProjectId(java.lang.String projectId) {
this.projectId = projectId;
return this;
}
@Override
public Query set(String parameterName, Object value) {
return (Query) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Models collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code Bigquery bigquery = new Bigquery(...);}
* {@code Bigquery.Models.List request = bigquery.models().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Models models() {
return new Models();
}
/**
* The "models" collection of methods.
*/
public class Models {
/**
* Deletes the model specified by modelId from the dataset.
*
* Create a request for the method "models.delete".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param projectId Required. Project ID of the model to delete.
* @param datasetId Required. Dataset ID of the model to delete.
* @param modelId Required. Model ID of the model to delete.
* @return the request
*/
public Delete delete(java.lang.String projectId, java.lang.String datasetId, java.lang.String modelId) throws java.io.IOException {
Delete result = new Delete(projectId, datasetId, modelId);
initialize(result);
return result;
}
public class Delete extends BigqueryRequest<Void> {
private static final String REST_PATH = "projects/{+projectId}/datasets/{+datasetId}/models/{+modelId}";
private final java.util.regex.Pattern PROJECT_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern DATASET_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern MODEL_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Deletes the model specified by modelId from the dataset.
*
* Create a request for the method "models.delete".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Required. Project ID of the model to delete.
* @param datasetId Required. Dataset ID of the model to delete.
* @param modelId Required. Model ID of the model to delete.
* @since 1.13
*/
protected Delete(java.lang.String projectId, java.lang.String datasetId, java.lang.String modelId) {
super(Bigquery.this, "DELETE", REST_PATH, null, Void.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_ID_PATTERN.matcher(projectId).matches(),
"Parameter projectId must conform to the pattern " +
"^[^/]+$");
}
this.datasetId = com.google.api.client.util.Preconditions.checkNotNull(datasetId, "Required parameter datasetId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(DATASET_ID_PATTERN.matcher(datasetId).matches(),
"Parameter datasetId must conform to the pattern " +
"^[^/]+$");
}
this.modelId = com.google.api.client.util.Preconditions.checkNotNull(modelId, "Required parameter modelId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(MODEL_ID_PATTERN.matcher(modelId).matches(),
"Parameter modelId must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUserIp(java.lang.String userIp) {
return (Delete) super.setUserIp(userIp);
}
/** Required. Project ID of the model to delete. */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Required. Project ID of the model to delete.
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Required. Project ID of the model to delete. */
public Delete setProjectId(java.lang.String projectId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_ID_PATTERN.matcher(projectId).matches(),
"Parameter projectId must conform to the pattern " +
"^[^/]+$");
}
this.projectId = projectId;
return this;
}
/** Required. Dataset ID of the model to delete. */
@com.google.api.client.util.Key
private java.lang.String datasetId;
/** Required. Dataset ID of the model to delete.
*/
public java.lang.String getDatasetId() {
return datasetId;
}
/** Required. Dataset ID of the model to delete. */
public Delete setDatasetId(java.lang.String datasetId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(DATASET_ID_PATTERN.matcher(datasetId).matches(),
"Parameter datasetId must conform to the pattern " +
"^[^/]+$");
}
this.datasetId = datasetId;
return this;
}
/** Required. Model ID of the model to delete. */
@com.google.api.client.util.Key
private java.lang.String modelId;
/** Required. Model ID of the model to delete.
*/
public java.lang.String getModelId() {
return modelId;
}
/** Required. Model ID of the model to delete. */
public Delete setModelId(java.lang.String modelId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(MODEL_ID_PATTERN.matcher(modelId).matches(),
"Parameter modelId must conform to the pattern " +
"^[^/]+$");
}
this.modelId = modelId;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Gets the specified model resource by model ID.
*
* Create a request for the method "models.get".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param projectId Required. Project ID of the requested model.
* @param datasetId Required. Dataset ID of the requested model.
* @param modelId Required. Model ID of the requested model.
* @return the request
*/
public Get get(java.lang.String projectId, java.lang.String datasetId, java.lang.String modelId) throws java.io.IOException {
Get result = new Get(projectId, datasetId, modelId);
initialize(result);
return result;
}
public class Get extends BigqueryRequest<com.google.api.services.bigquery.model.Model> {
private static final String REST_PATH = "projects/{+projectId}/datasets/{+datasetId}/models/{+modelId}";
private final java.util.regex.Pattern PROJECT_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern DATASET_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern MODEL_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets the specified model resource by model ID.
*
* Create a request for the method "models.get".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Required. Project ID of the requested model.
* @param datasetId Required. Dataset ID of the requested model.
* @param modelId Required. Model ID of the requested model.
* @since 1.13
*/
protected Get(java.lang.String projectId, java.lang.String datasetId, java.lang.String modelId) {
super(Bigquery.this, "GET", REST_PATH, null, com.google.api.services.bigquery.model.Model.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_ID_PATTERN.matcher(projectId).matches(),
"Parameter projectId must conform to the pattern " +
"^[^/]+$");
}
this.datasetId = com.google.api.client.util.Preconditions.checkNotNull(datasetId, "Required parameter datasetId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(DATASET_ID_PATTERN.matcher(datasetId).matches(),
"Parameter datasetId must conform to the pattern " +
"^[^/]+$");
}
this.modelId = com.google.api.client.util.Preconditions.checkNotNull(modelId, "Required parameter modelId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(MODEL_ID_PATTERN.matcher(modelId).matches(),
"Parameter modelId must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUserIp(java.lang.String userIp) {
return (Get) super.setUserIp(userIp);
}
/** Required. Project ID of the requested model. */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Required. Project ID of the requested model.
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Required. Project ID of the requested model. */
public Get setProjectId(java.lang.String projectId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_ID_PATTERN.matcher(projectId).matches(),
"Parameter projectId must conform to the pattern " +
"^[^/]+$");
}
this.projectId = projectId;
return this;
}
/** Required. Dataset ID of the requested model. */
@com.google.api.client.util.Key
private java.lang.String datasetId;
/** Required. Dataset ID of the requested model.
*/
public java.lang.String getDatasetId() {
return datasetId;
}
/** Required. Dataset ID of the requested model. */
public Get setDatasetId(java.lang.String datasetId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(DATASET_ID_PATTERN.matcher(datasetId).matches(),
"Parameter datasetId must conform to the pattern " +
"^[^/]+$");
}
this.datasetId = datasetId;
return this;
}
/** Required. Model ID of the requested model. */
@com.google.api.client.util.Key
private java.lang.String modelId;
/** Required. Model ID of the requested model.
*/
public java.lang.String getModelId() {
return modelId;
}
/** Required. Model ID of the requested model. */
public Get setModelId(java.lang.String modelId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(MODEL_ID_PATTERN.matcher(modelId).matches(),
"Parameter modelId must conform to the pattern " +
"^[^/]+$");
}
this.modelId = modelId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists all models in the specified dataset. Requires the READER dataset role. After retrieving the
* list of models, you can get information about a particular model by calling the models.get
* method.
*
* Create a request for the method "models.list".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param projectId Required. Project ID of the models to list.
* @param datasetId Required. Dataset ID of the models to list.
* @return the request
*/
public List list(java.lang.String projectId, java.lang.String datasetId) throws java.io.IOException {
List result = new List(projectId, datasetId);
initialize(result);
return result;
}
public class List extends BigqueryRequest<com.google.api.services.bigquery.model.ListModelsResponse> {
private static final String REST_PATH = "projects/{+projectId}/datasets/{+datasetId}/models";
private final java.util.regex.Pattern PROJECT_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern DATASET_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Lists all models in the specified dataset. Requires the READER dataset role. After retrieving
* the list of models, you can get information about a particular model by calling the models.get
* method.
*
* Create a request for the method "models.list".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Required. Project ID of the models to list.
* @param datasetId Required. Dataset ID of the models to list.
* @since 1.13
*/
protected List(java.lang.String projectId, java.lang.String datasetId) {
super(Bigquery.this, "GET", REST_PATH, null, com.google.api.services.bigquery.model.ListModelsResponse.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_ID_PATTERN.matcher(projectId).matches(),
"Parameter projectId must conform to the pattern " +
"^[^/]+$");
}
this.datasetId = com.google.api.client.util.Preconditions.checkNotNull(datasetId, "Required parameter datasetId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(DATASET_ID_PATTERN.matcher(datasetId).matches(),
"Parameter datasetId must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUserIp(java.lang.String userIp) {
return (List) super.setUserIp(userIp);
}
/** Required. Project ID of the models to list. */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Required. Project ID of the models to list.
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Required. Project ID of the models to list. */
public List setProjectId(java.lang.String projectId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_ID_PATTERN.matcher(projectId).matches(),
"Parameter projectId must conform to the pattern " +
"^[^/]+$");
}
this.projectId = projectId;
return this;
}
/** Required. Dataset ID of the models to list. */
@com.google.api.client.util.Key
private java.lang.String datasetId;
/** Required. Dataset ID of the models to list.
*/
public java.lang.String getDatasetId() {
return datasetId;
}
/** Required. Dataset ID of the models to list. */
public List setDatasetId(java.lang.String datasetId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(DATASET_ID_PATTERN.matcher(datasetId).matches(),
"Parameter datasetId must conform to the pattern " +
"^[^/]+$");
}
this.datasetId = datasetId;
return this;
}
/**
* The maximum number of results to return in a single response page. Leverage the page tokens
* to iterate through the entire collection.
*/
@com.google.api.client.util.Key
private java.lang.Long maxResults;
/** The maximum number of results to return in a single response page. Leverage the page tokens to
iterate through the entire collection.
*/
public java.lang.Long getMaxResults() {
return maxResults;
}
/**
* The maximum number of results to return in a single response page. Leverage the page tokens
* to iterate through the entire collection.
*/
public List setMaxResults(java.lang.Long maxResults) {
this.maxResults = maxResults;
return this;
}
/** Page token, returned by a previous call to request the next page of results */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** Page token, returned by a previous call to request the next page of results
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** Page token, returned by a previous call to request the next page of results */
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Patch specific fields in the specified model.
*
* Create a request for the method "models.patch".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link Patch#execute()} method to invoke the remote operation.
*
* @param projectId Required. Project ID of the model to patch.
* @param datasetId Required. Dataset ID of the model to patch.
* @param modelId Required. Model ID of the model to patch.
* @param content the {@link com.google.api.services.bigquery.model.Model}
* @return the request
*/
public Patch patch(java.lang.String projectId, java.lang.String datasetId, java.lang.String modelId, com.google.api.services.bigquery.model.Model content) throws java.io.IOException {
Patch result = new Patch(projectId, datasetId, modelId, content);
initialize(result);
return result;
}
public class Patch extends BigqueryRequest<com.google.api.services.bigquery.model.Model> {
private static final String REST_PATH = "projects/{+projectId}/datasets/{+datasetId}/models/{+modelId}";
private final java.util.regex.Pattern PROJECT_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern DATASET_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern MODEL_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Patch specific fields in the specified model.
*
* Create a request for the method "models.patch".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link Patch#execute()} method to invoke the remote operation.
* <p> {@link
* Patch#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Required. Project ID of the model to patch.
* @param datasetId Required. Dataset ID of the model to patch.
* @param modelId Required. Model ID of the model to patch.
* @param content the {@link com.google.api.services.bigquery.model.Model}
* @since 1.13
*/
protected Patch(java.lang.String projectId, java.lang.String datasetId, java.lang.String modelId, com.google.api.services.bigquery.model.Model content) {
super(Bigquery.this, "PATCH", REST_PATH, content, com.google.api.services.bigquery.model.Model.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_ID_PATTERN.matcher(projectId).matches(),
"Parameter projectId must conform to the pattern " +
"^[^/]+$");
}
this.datasetId = com.google.api.client.util.Preconditions.checkNotNull(datasetId, "Required parameter datasetId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(DATASET_ID_PATTERN.matcher(datasetId).matches(),
"Parameter datasetId must conform to the pattern " +
"^[^/]+$");
}
this.modelId = com.google.api.client.util.Preconditions.checkNotNull(modelId, "Required parameter modelId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(MODEL_ID_PATTERN.matcher(modelId).matches(),
"Parameter modelId must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public Patch setAlt(java.lang.String alt) {
return (Patch) super.setAlt(alt);
}
@Override
public Patch setFields(java.lang.String fields) {
return (Patch) super.setFields(fields);
}
@Override
public Patch setKey(java.lang.String key) {
return (Patch) super.setKey(key);
}
@Override
public Patch setOauthToken(java.lang.String oauthToken) {
return (Patch) super.setOauthToken(oauthToken);
}
@Override
public Patch setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Patch) super.setPrettyPrint(prettyPrint);
}
@Override
public Patch setQuotaUser(java.lang.String quotaUser) {
return (Patch) super.setQuotaUser(quotaUser);
}
@Override
public Patch setUserIp(java.lang.String userIp) {
return (Patch) super.setUserIp(userIp);
}
/** Required. Project ID of the model to patch. */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Required. Project ID of the model to patch.
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Required. Project ID of the model to patch. */
public Patch setProjectId(java.lang.String projectId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_ID_PATTERN.matcher(projectId).matches(),
"Parameter projectId must conform to the pattern " +
"^[^/]+$");
}
this.projectId = projectId;
return this;
}
/** Required. Dataset ID of the model to patch. */
@com.google.api.client.util.Key
private java.lang.String datasetId;
/** Required. Dataset ID of the model to patch.
*/
public java.lang.String getDatasetId() {
return datasetId;
}
/** Required. Dataset ID of the model to patch. */
public Patch setDatasetId(java.lang.String datasetId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(DATASET_ID_PATTERN.matcher(datasetId).matches(),
"Parameter datasetId must conform to the pattern " +
"^[^/]+$");
}
this.datasetId = datasetId;
return this;
}
/** Required. Model ID of the model to patch. */
@com.google.api.client.util.Key
private java.lang.String modelId;
/** Required. Model ID of the model to patch.
*/
public java.lang.String getModelId() {
return modelId;
}
/** Required. Model ID of the model to patch. */
public Patch setModelId(java.lang.String modelId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(MODEL_ID_PATTERN.matcher(modelId).matches(),
"Parameter modelId must conform to the pattern " +
"^[^/]+$");
}
this.modelId = modelId;
return this;
}
@Override
public Patch set(String parameterName, Object value) {
return (Patch) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Projects collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code Bigquery bigquery = new Bigquery(...);}
* {@code Bigquery.Projects.List request = bigquery.projects().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Projects projects() {
return new Projects();
}
/**
* The "projects" collection of methods.
*/
public class Projects {
/**
* Returns the email address of the service account for your project used for interactions with
* Google Cloud KMS.
*
* Create a request for the method "projects.getServiceAccount".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link GetServiceAccount#execute()} method to invoke the remote operation.
*
* @param projectId Project ID for which the service account is requested.
* @return the request
*/
public GetServiceAccount getServiceAccount(java.lang.String projectId) throws java.io.IOException {
GetServiceAccount result = new GetServiceAccount(projectId);
initialize(result);
return result;
}
public class GetServiceAccount extends BigqueryRequest<com.google.api.services.bigquery.model.GetServiceAccountResponse> {
private static final String REST_PATH = "projects/{projectId}/serviceAccount";
/**
* Returns the email address of the service account for your project used for interactions with
* Google Cloud KMS.
*
* Create a request for the method "projects.getServiceAccount".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link GetServiceAccount#execute()} method to invoke the remote
* operation. <p> {@link GetServiceAccount#initialize(com.google.api.client.googleapis.services.Ab
* stractGoogleClientRequest)} must be called to initialize this instance immediately after
* invoking the constructor. </p>
*
* @param projectId Project ID for which the service account is requested.
* @since 1.13
*/
protected GetServiceAccount(java.lang.String projectId) {
super(Bigquery.this, "GET", REST_PATH, null, com.google.api.services.bigquery.model.GetServiceAccountResponse.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public GetServiceAccount setAlt(java.lang.String alt) {
return (GetServiceAccount) super.setAlt(alt);
}
@Override
public GetServiceAccount setFields(java.lang.String fields) {
return (GetServiceAccount) super.setFields(fields);
}
@Override
public GetServiceAccount setKey(java.lang.String key) {
return (GetServiceAccount) super.setKey(key);
}
@Override
public GetServiceAccount setOauthToken(java.lang.String oauthToken) {
return (GetServiceAccount) super.setOauthToken(oauthToken);
}
@Override
public GetServiceAccount setPrettyPrint(java.lang.Boolean prettyPrint) {
return (GetServiceAccount) super.setPrettyPrint(prettyPrint);
}
@Override
public GetServiceAccount setQuotaUser(java.lang.String quotaUser) {
return (GetServiceAccount) super.setQuotaUser(quotaUser);
}
@Override
public GetServiceAccount setUserIp(java.lang.String userIp) {
return (GetServiceAccount) super.setUserIp(userIp);
}
/** Project ID for which the service account is requested. */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Project ID for which the service account is requested.
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Project ID for which the service account is requested. */
public GetServiceAccount setProjectId(java.lang.String projectId) {
this.projectId = projectId;
return this;
}
@Override
public GetServiceAccount set(String parameterName, Object value) {
return (GetServiceAccount) super.set(parameterName, value);
}
}
/**
* Lists all projects to which you have been granted any project role.
*
* Create a request for the method "projects.list".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @return the request
*/
public List list() throws java.io.IOException {
List result = new List();
initialize(result);
return result;
}
public class List extends BigqueryRequest<com.google.api.services.bigquery.model.ProjectList> {
private static final String REST_PATH = "projects";
/**
* Lists all projects to which you have been granted any project role.
*
* Create a request for the method "projects.list".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @since 1.13
*/
protected List() {
super(Bigquery.this, "GET", REST_PATH, null, com.google.api.services.bigquery.model.ProjectList.class);
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUserIp(java.lang.String userIp) {
return (List) super.setUserIp(userIp);
}
/** Maximum number of results to return */
@com.google.api.client.util.Key
private java.lang.Long maxResults;
/** Maximum number of results to return
*/
public java.lang.Long getMaxResults() {
return maxResults;
}
/** Maximum number of results to return */
public List setMaxResults(java.lang.Long maxResults) {
this.maxResults = maxResults;
return this;
}
/** Page token, returned by a previous call, to request the next page of results */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** Page token, returned by a previous call, to request the next page of results
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** Page token, returned by a previous call, to request the next page of results */
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Routines collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code Bigquery bigquery = new Bigquery(...);}
* {@code Bigquery.Routines.List request = bigquery.routines().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Routines routines() {
return new Routines();
}
/**
* The "routines" collection of methods.
*/
public class Routines {
/**
* Deletes the routine specified by routineId from the dataset.
*
* Create a request for the method "routines.delete".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param projectId Required. Project ID of the routine to delete
* @param datasetId Required. Dataset ID of the routine to delete
* @param routineId Required. Routine ID of the routine to delete
* @return the request
*/
public Delete delete(java.lang.String projectId, java.lang.String datasetId, java.lang.String routineId) throws java.io.IOException {
Delete result = new Delete(projectId, datasetId, routineId);
initialize(result);
return result;
}
public class Delete extends BigqueryRequest<Void> {
private static final String REST_PATH = "projects/{+projectId}/datasets/{+datasetId}/routines/{+routineId}";
private final java.util.regex.Pattern PROJECT_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern DATASET_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern ROUTINE_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Deletes the routine specified by routineId from the dataset.
*
* Create a request for the method "routines.delete".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Required. Project ID of the routine to delete
* @param datasetId Required. Dataset ID of the routine to delete
* @param routineId Required. Routine ID of the routine to delete
* @since 1.13
*/
protected Delete(java.lang.String projectId, java.lang.String datasetId, java.lang.String routineId) {
super(Bigquery.this, "DELETE", REST_PATH, null, Void.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_ID_PATTERN.matcher(projectId).matches(),
"Parameter projectId must conform to the pattern " +
"^[^/]+$");
}
this.datasetId = com.google.api.client.util.Preconditions.checkNotNull(datasetId, "Required parameter datasetId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(DATASET_ID_PATTERN.matcher(datasetId).matches(),
"Parameter datasetId must conform to the pattern " +
"^[^/]+$");
}
this.routineId = com.google.api.client.util.Preconditions.checkNotNull(routineId, "Required parameter routineId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(ROUTINE_ID_PATTERN.matcher(routineId).matches(),
"Parameter routineId must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUserIp(java.lang.String userIp) {
return (Delete) super.setUserIp(userIp);
}
/** Required. Project ID of the routine to delete */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Required. Project ID of the routine to delete
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Required. Project ID of the routine to delete */
public Delete setProjectId(java.lang.String projectId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_ID_PATTERN.matcher(projectId).matches(),
"Parameter projectId must conform to the pattern " +
"^[^/]+$");
}
this.projectId = projectId;
return this;
}
/** Required. Dataset ID of the routine to delete */
@com.google.api.client.util.Key
private java.lang.String datasetId;
/** Required. Dataset ID of the routine to delete
*/
public java.lang.String getDatasetId() {
return datasetId;
}
/** Required. Dataset ID of the routine to delete */
public Delete setDatasetId(java.lang.String datasetId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(DATASET_ID_PATTERN.matcher(datasetId).matches(),
"Parameter datasetId must conform to the pattern " +
"^[^/]+$");
}
this.datasetId = datasetId;
return this;
}
/** Required. Routine ID of the routine to delete */
@com.google.api.client.util.Key
private java.lang.String routineId;
/** Required. Routine ID of the routine to delete
*/
public java.lang.String getRoutineId() {
return routineId;
}
/** Required. Routine ID of the routine to delete */
public Delete setRoutineId(java.lang.String routineId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(ROUTINE_ID_PATTERN.matcher(routineId).matches(),
"Parameter routineId must conform to the pattern " +
"^[^/]+$");
}
this.routineId = routineId;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Gets the specified routine resource by routine ID.
*
* Create a request for the method "routines.get".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param projectId Required. Project ID of the requested routine
* @param datasetId Required. Dataset ID of the requested routine
* @param routineId Required. Routine ID of the requested routine
* @return the request
*/
public Get get(java.lang.String projectId, java.lang.String datasetId, java.lang.String routineId) throws java.io.IOException {
Get result = new Get(projectId, datasetId, routineId);
initialize(result);
return result;
}
public class Get extends BigqueryRequest<com.google.api.services.bigquery.model.Routine> {
private static final String REST_PATH = "projects/{+projectId}/datasets/{+datasetId}/routines/{+routineId}";
private final java.util.regex.Pattern PROJECT_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern DATASET_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern ROUTINE_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets the specified routine resource by routine ID.
*
* Create a request for the method "routines.get".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Required. Project ID of the requested routine
* @param datasetId Required. Dataset ID of the requested routine
* @param routineId Required. Routine ID of the requested routine
* @since 1.13
*/
protected Get(java.lang.String projectId, java.lang.String datasetId, java.lang.String routineId) {
super(Bigquery.this, "GET", REST_PATH, null, com.google.api.services.bigquery.model.Routine.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_ID_PATTERN.matcher(projectId).matches(),
"Parameter projectId must conform to the pattern " +
"^[^/]+$");
}
this.datasetId = com.google.api.client.util.Preconditions.checkNotNull(datasetId, "Required parameter datasetId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(DATASET_ID_PATTERN.matcher(datasetId).matches(),
"Parameter datasetId must conform to the pattern " +
"^[^/]+$");
}
this.routineId = com.google.api.client.util.Preconditions.checkNotNull(routineId, "Required parameter routineId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(ROUTINE_ID_PATTERN.matcher(routineId).matches(),
"Parameter routineId must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUserIp(java.lang.String userIp) {
return (Get) super.setUserIp(userIp);
}
/** Required. Project ID of the requested routine */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Required. Project ID of the requested routine
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Required. Project ID of the requested routine */
public Get setProjectId(java.lang.String projectId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_ID_PATTERN.matcher(projectId).matches(),
"Parameter projectId must conform to the pattern " +
"^[^/]+$");
}
this.projectId = projectId;
return this;
}
/** Required. Dataset ID of the requested routine */
@com.google.api.client.util.Key
private java.lang.String datasetId;
/** Required. Dataset ID of the requested routine
*/
public java.lang.String getDatasetId() {
return datasetId;
}
/** Required. Dataset ID of the requested routine */
public Get setDatasetId(java.lang.String datasetId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(DATASET_ID_PATTERN.matcher(datasetId).matches(),
"Parameter datasetId must conform to the pattern " +
"^[^/]+$");
}
this.datasetId = datasetId;
return this;
}
/** Required. Routine ID of the requested routine */
@com.google.api.client.util.Key
private java.lang.String routineId;
/** Required. Routine ID of the requested routine
*/
public java.lang.String getRoutineId() {
return routineId;
}
/** Required. Routine ID of the requested routine */
public Get setRoutineId(java.lang.String routineId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(ROUTINE_ID_PATTERN.matcher(routineId).matches(),
"Parameter routineId must conform to the pattern " +
"^[^/]+$");
}
this.routineId = routineId;
return this;
}
/**
* If set, only the Routine fields in the field mask are returned in the response. If unset,
* all Routine fields are returned.
*/
@com.google.api.client.util.Key
private String readMask;
/** If set, only the Routine fields in the field mask are returned in the response. If unset, all
Routine fields are returned.
*/
public String getReadMask() {
return readMask;
}
/**
* If set, only the Routine fields in the field mask are returned in the response. If unset,
* all Routine fields are returned.
*/
public Get setReadMask(String readMask) {
this.readMask = readMask;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Creates a new routine in the dataset.
*
* Create a request for the method "routines.insert".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link Insert#execute()} method to invoke the remote operation.
*
* @param projectId Required. Project ID of the new routine
* @param datasetId Required. Dataset ID of the new routine
* @param content the {@link com.google.api.services.bigquery.model.Routine}
* @return the request
*/
public Insert insert(java.lang.String projectId, java.lang.String datasetId, com.google.api.services.bigquery.model.Routine content) throws java.io.IOException {
Insert result = new Insert(projectId, datasetId, content);
initialize(result);
return result;
}
public class Insert extends BigqueryRequest<com.google.api.services.bigquery.model.Routine> {
private static final String REST_PATH = "projects/{+projectId}/datasets/{+datasetId}/routines";
private final java.util.regex.Pattern PROJECT_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern DATASET_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Creates a new routine in the dataset.
*
* Create a request for the method "routines.insert".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link Insert#execute()} method to invoke the remote operation.
* <p> {@link
* Insert#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Required. Project ID of the new routine
* @param datasetId Required. Dataset ID of the new routine
* @param content the {@link com.google.api.services.bigquery.model.Routine}
* @since 1.13
*/
protected Insert(java.lang.String projectId, java.lang.String datasetId, com.google.api.services.bigquery.model.Routine content) {
super(Bigquery.this, "POST", REST_PATH, content, com.google.api.services.bigquery.model.Routine.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_ID_PATTERN.matcher(projectId).matches(),
"Parameter projectId must conform to the pattern " +
"^[^/]+$");
}
this.datasetId = com.google.api.client.util.Preconditions.checkNotNull(datasetId, "Required parameter datasetId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(DATASET_ID_PATTERN.matcher(datasetId).matches(),
"Parameter datasetId must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public Insert setAlt(java.lang.String alt) {
return (Insert) super.setAlt(alt);
}
@Override
public Insert setFields(java.lang.String fields) {
return (Insert) super.setFields(fields);
}
@Override
public Insert setKey(java.lang.String key) {
return (Insert) super.setKey(key);
}
@Override
public Insert setOauthToken(java.lang.String oauthToken) {
return (Insert) super.setOauthToken(oauthToken);
}
@Override
public Insert setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Insert) super.setPrettyPrint(prettyPrint);
}
@Override
public Insert setQuotaUser(java.lang.String quotaUser) {
return (Insert) super.setQuotaUser(quotaUser);
}
@Override
public Insert setUserIp(java.lang.String userIp) {
return (Insert) super.setUserIp(userIp);
}
/** Required. Project ID of the new routine */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Required. Project ID of the new routine
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Required. Project ID of the new routine */
public Insert setProjectId(java.lang.String projectId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_ID_PATTERN.matcher(projectId).matches(),
"Parameter projectId must conform to the pattern " +
"^[^/]+$");
}
this.projectId = projectId;
return this;
}
/** Required. Dataset ID of the new routine */
@com.google.api.client.util.Key
private java.lang.String datasetId;
/** Required. Dataset ID of the new routine
*/
public java.lang.String getDatasetId() {
return datasetId;
}
/** Required. Dataset ID of the new routine */
public Insert setDatasetId(java.lang.String datasetId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(DATASET_ID_PATTERN.matcher(datasetId).matches(),
"Parameter datasetId must conform to the pattern " +
"^[^/]+$");
}
this.datasetId = datasetId;
return this;
}
@Override
public Insert set(String parameterName, Object value) {
return (Insert) super.set(parameterName, value);
}
}
/**
* Lists all routines in the specified dataset. Requires the READER dataset role.
*
* Create a request for the method "routines.list".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param projectId Required. Project ID of the routines to list
* @param datasetId Required. Dataset ID of the routines to list
* @return the request
*/
public List list(java.lang.String projectId, java.lang.String datasetId) throws java.io.IOException {
List result = new List(projectId, datasetId);
initialize(result);
return result;
}
public class List extends BigqueryRequest<com.google.api.services.bigquery.model.ListRoutinesResponse> {
private static final String REST_PATH = "projects/{+projectId}/datasets/{+datasetId}/routines";
private final java.util.regex.Pattern PROJECT_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern DATASET_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Lists all routines in the specified dataset. Requires the READER dataset role.
*
* Create a request for the method "routines.list".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Required. Project ID of the routines to list
* @param datasetId Required. Dataset ID of the routines to list
* @since 1.13
*/
protected List(java.lang.String projectId, java.lang.String datasetId) {
super(Bigquery.this, "GET", REST_PATH, null, com.google.api.services.bigquery.model.ListRoutinesResponse.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_ID_PATTERN.matcher(projectId).matches(),
"Parameter projectId must conform to the pattern " +
"^[^/]+$");
}
this.datasetId = com.google.api.client.util.Preconditions.checkNotNull(datasetId, "Required parameter datasetId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(DATASET_ID_PATTERN.matcher(datasetId).matches(),
"Parameter datasetId must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUserIp(java.lang.String userIp) {
return (List) super.setUserIp(userIp);
}
/** Required. Project ID of the routines to list */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Required. Project ID of the routines to list
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Required. Project ID of the routines to list */
public List setProjectId(java.lang.String projectId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_ID_PATTERN.matcher(projectId).matches(),
"Parameter projectId must conform to the pattern " +
"^[^/]+$");
}
this.projectId = projectId;
return this;
}
/** Required. Dataset ID of the routines to list */
@com.google.api.client.util.Key
private java.lang.String datasetId;
/** Required. Dataset ID of the routines to list
*/
public java.lang.String getDatasetId() {
return datasetId;
}
/** Required. Dataset ID of the routines to list */
public List setDatasetId(java.lang.String datasetId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(DATASET_ID_PATTERN.matcher(datasetId).matches(),
"Parameter datasetId must conform to the pattern " +
"^[^/]+$");
}
this.datasetId = datasetId;
return this;
}
/**
* If set, then only the Routines matching this filter are returned. The current supported
* form is either "routine_type:" or "routineType:", where is a RoutineType enum. Example:
* "routineType:SCALAR_FUNCTION".
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** If set, then only the Routines matching this filter are returned. The current supported form is
either "routine_type:" or "routineType:", where is a RoutineType enum. Example:
"routineType:SCALAR_FUNCTION".
*/
public java.lang.String getFilter() {
return filter;
}
/**
* If set, then only the Routines matching this filter are returned. The current supported
* form is either "routine_type:" or "routineType:", where is a RoutineType enum. Example:
* "routineType:SCALAR_FUNCTION".
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* The maximum number of results to return in a single response page. Leverage the page tokens
* to iterate through the entire collection.
*/
@com.google.api.client.util.Key
private java.lang.Long maxResults;
/** The maximum number of results to return in a single response page. Leverage the page tokens to
iterate through the entire collection.
*/
public java.lang.Long getMaxResults() {
return maxResults;
}
/**
* The maximum number of results to return in a single response page. Leverage the page tokens
* to iterate through the entire collection.
*/
public List setMaxResults(java.lang.Long maxResults) {
this.maxResults = maxResults;
return this;
}
/** Page token, returned by a previous call, to request the next page of results */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** Page token, returned by a previous call, to request the next page of results
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** Page token, returned by a previous call, to request the next page of results */
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
/**
* If set, then only the Routine fields in the field mask, as well as project_id, dataset_id
* and routine_id, are returned in the response. If unset, then the following Routine fields
* are returned: etag, project_id, dataset_id, routine_id, routine_type, creation_time,
* last_modified_time, and language.
*/
@com.google.api.client.util.Key
private String readMask;
/** If set, then only the Routine fields in the field mask, as well as project_id, dataset_id and
routine_id, are returned in the response. If unset, then the following Routine fields are returned:
etag, project_id, dataset_id, routine_id, routine_type, creation_time, last_modified_time, and
language.
*/
public String getReadMask() {
return readMask;
}
/**
* If set, then only the Routine fields in the field mask, as well as project_id, dataset_id
* and routine_id, are returned in the response. If unset, then the following Routine fields
* are returned: etag, project_id, dataset_id, routine_id, routine_type, creation_time,
* last_modified_time, and language.
*/
public List setReadMask(String readMask) {
this.readMask = readMask;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Updates information in an existing routine. The update method replaces the entire Routine
* resource.
*
* Create a request for the method "routines.update".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link Update#execute()} method to invoke the remote operation.
*
* @param projectId Required. Project ID of the routine to update
* @param datasetId Required. Dataset ID of the routine to update
* @param routineId Required. Routine ID of the routine to update
* @param content the {@link com.google.api.services.bigquery.model.Routine}
* @return the request
*/
public Update update(java.lang.String projectId, java.lang.String datasetId, java.lang.String routineId, com.google.api.services.bigquery.model.Routine content) throws java.io.IOException {
Update result = new Update(projectId, datasetId, routineId, content);
initialize(result);
return result;
}
public class Update extends BigqueryRequest<com.google.api.services.bigquery.model.Routine> {
private static final String REST_PATH = "projects/{+projectId}/datasets/{+datasetId}/routines/{+routineId}";
private final java.util.regex.Pattern PROJECT_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern DATASET_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern ROUTINE_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Updates information in an existing routine. The update method replaces the entire Routine
* resource.
*
* Create a request for the method "routines.update".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link Update#execute()} method to invoke the remote operation.
* <p> {@link
* Update#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Required. Project ID of the routine to update
* @param datasetId Required. Dataset ID of the routine to update
* @param routineId Required. Routine ID of the routine to update
* @param content the {@link com.google.api.services.bigquery.model.Routine}
* @since 1.13
*/
protected Update(java.lang.String projectId, java.lang.String datasetId, java.lang.String routineId, com.google.api.services.bigquery.model.Routine content) {
super(Bigquery.this, "PUT", REST_PATH, content, com.google.api.services.bigquery.model.Routine.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_ID_PATTERN.matcher(projectId).matches(),
"Parameter projectId must conform to the pattern " +
"^[^/]+$");
}
this.datasetId = com.google.api.client.util.Preconditions.checkNotNull(datasetId, "Required parameter datasetId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(DATASET_ID_PATTERN.matcher(datasetId).matches(),
"Parameter datasetId must conform to the pattern " +
"^[^/]+$");
}
this.routineId = com.google.api.client.util.Preconditions.checkNotNull(routineId, "Required parameter routineId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(ROUTINE_ID_PATTERN.matcher(routineId).matches(),
"Parameter routineId must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public Update setAlt(java.lang.String alt) {
return (Update) super.setAlt(alt);
}
@Override
public Update setFields(java.lang.String fields) {
return (Update) super.setFields(fields);
}
@Override
public Update setKey(java.lang.String key) {
return (Update) super.setKey(key);
}
@Override
public Update setOauthToken(java.lang.String oauthToken) {
return (Update) super.setOauthToken(oauthToken);
}
@Override
public Update setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Update) super.setPrettyPrint(prettyPrint);
}
@Override
public Update setQuotaUser(java.lang.String quotaUser) {
return (Update) super.setQuotaUser(quotaUser);
}
@Override
public Update setUserIp(java.lang.String userIp) {
return (Update) super.setUserIp(userIp);
}
/** Required. Project ID of the routine to update */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Required. Project ID of the routine to update
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Required. Project ID of the routine to update */
public Update setProjectId(java.lang.String projectId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_ID_PATTERN.matcher(projectId).matches(),
"Parameter projectId must conform to the pattern " +
"^[^/]+$");
}
this.projectId = projectId;
return this;
}
/** Required. Dataset ID of the routine to update */
@com.google.api.client.util.Key
private java.lang.String datasetId;
/** Required. Dataset ID of the routine to update
*/
public java.lang.String getDatasetId() {
return datasetId;
}
/** Required. Dataset ID of the routine to update */
public Update setDatasetId(java.lang.String datasetId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(DATASET_ID_PATTERN.matcher(datasetId).matches(),
"Parameter datasetId must conform to the pattern " +
"^[^/]+$");
}
this.datasetId = datasetId;
return this;
}
/** Required. Routine ID of the routine to update */
@com.google.api.client.util.Key
private java.lang.String routineId;
/** Required. Routine ID of the routine to update
*/
public java.lang.String getRoutineId() {
return routineId;
}
/** Required. Routine ID of the routine to update */
public Update setRoutineId(java.lang.String routineId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(ROUTINE_ID_PATTERN.matcher(routineId).matches(),
"Parameter routineId must conform to the pattern " +
"^[^/]+$");
}
this.routineId = routineId;
return this;
}
@Override
public Update set(String parameterName, Object value) {
return (Update) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the RowAccessPolicies collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code Bigquery bigquery = new Bigquery(...);}
* {@code Bigquery.RowAccessPolicies.List request = bigquery.rowAccessPolicies().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public RowAccessPolicies rowAccessPolicies() {
return new RowAccessPolicies();
}
/**
* The "rowAccessPolicies" collection of methods.
*/
public class RowAccessPolicies {
/**
* Gets the access control policy for a resource. Returns an empty policy if the resource exists and
* does not have a policy set.
*
* Create a request for the method "rowAccessPolicies.getIamPolicy".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link GetIamPolicy#execute()} method to invoke the remote operation.
*
* @param resource REQUIRED: The resource for which the policy is being requested. See the operation documentation for
* the appropriate value for this field.
* @param content the {@link com.google.api.services.bigquery.model.GetIamPolicyRequest}
* @return the request
*/
public GetIamPolicy getIamPolicy(java.lang.String resource, com.google.api.services.bigquery.model.GetIamPolicyRequest content) throws java.io.IOException {
GetIamPolicy result = new GetIamPolicy(resource, content);
initialize(result);
return result;
}
public class GetIamPolicy extends BigqueryRequest<com.google.api.services.bigquery.model.Policy> {
private static final String REST_PATH = "{+resource}:getIamPolicy";
private final java.util.regex.Pattern RESOURCE_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/datasets/[^/]+/tables/[^/]+/rowAccessPolicies/[^/]+$");
/**
* Gets the access control policy for a resource. Returns an empty policy if the resource exists
* and does not have a policy set.
*
* Create a request for the method "rowAccessPolicies.getIamPolicy".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link GetIamPolicy#execute()} method to invoke the remote
* operation. <p> {@link
* GetIamPolicy#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param resource REQUIRED: The resource for which the policy is being requested. See the operation documentation for
* the appropriate value for this field.
* @param content the {@link com.google.api.services.bigquery.model.GetIamPolicyRequest}
* @since 1.13
*/
protected GetIamPolicy(java.lang.String resource, com.google.api.services.bigquery.model.GetIamPolicyRequest content) {
super(Bigquery.this, "POST", REST_PATH, content, com.google.api.services.bigquery.model.Policy.class);
this.resource = com.google.api.client.util.Preconditions.checkNotNull(resource, "Required parameter resource must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/datasets/[^/]+/tables/[^/]+/rowAccessPolicies/[^/]+$");
}
}
@Override
public GetIamPolicy setAlt(java.lang.String alt) {
return (GetIamPolicy) super.setAlt(alt);
}
@Override
public GetIamPolicy setFields(java.lang.String fields) {
return (GetIamPolicy) super.setFields(fields);
}
@Override
public GetIamPolicy setKey(java.lang.String key) {
return (GetIamPolicy) super.setKey(key);
}
@Override
public GetIamPolicy setOauthToken(java.lang.String oauthToken) {
return (GetIamPolicy) super.setOauthToken(oauthToken);
}
@Override
public GetIamPolicy setPrettyPrint(java.lang.Boolean prettyPrint) {
return (GetIamPolicy) super.setPrettyPrint(prettyPrint);
}
@Override
public GetIamPolicy setQuotaUser(java.lang.String quotaUser) {
return (GetIamPolicy) super.setQuotaUser(quotaUser);
}
@Override
public GetIamPolicy setUserIp(java.lang.String userIp) {
return (GetIamPolicy) super.setUserIp(userIp);
}
/**
* REQUIRED: The resource for which the policy is being requested. See the operation
* documentation for the appropriate value for this field.
*/
@com.google.api.client.util.Key
private java.lang.String resource;
/** REQUIRED: The resource for which the policy is being requested. See the operation documentation for
the appropriate value for this field.
*/
public java.lang.String getResource() {
return resource;
}
/**
* REQUIRED: The resource for which the policy is being requested. See the operation
* documentation for the appropriate value for this field.
*/
public GetIamPolicy setResource(java.lang.String resource) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/datasets/[^/]+/tables/[^/]+/rowAccessPolicies/[^/]+$");
}
this.resource = resource;
return this;
}
@Override
public GetIamPolicy set(String parameterName, Object value) {
return (GetIamPolicy) super.set(parameterName, value);
}
}
/**
* Lists all row access policies on the specified table.
*
* Create a request for the method "rowAccessPolicies.list".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param projectId Required. Project ID of the row access policies to list.
* @param datasetId Required. Dataset ID of row access policies to list.
* @param tableId Required. Table ID of the table to list row access policies.
* @return the request
*/
public List list(java.lang.String projectId, java.lang.String datasetId, java.lang.String tableId) throws java.io.IOException {
List result = new List(projectId, datasetId, tableId);
initialize(result);
return result;
}
public class List extends BigqueryRequest<com.google.api.services.bigquery.model.ListRowAccessPoliciesResponse> {
private static final String REST_PATH = "projects/{+projectId}/datasets/{+datasetId}/tables/{+tableId}/rowAccessPolicies";
private final java.util.regex.Pattern PROJECT_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern DATASET_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern TABLE_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Lists all row access policies on the specified table.
*
* Create a request for the method "rowAccessPolicies.list".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Required. Project ID of the row access policies to list.
* @param datasetId Required. Dataset ID of row access policies to list.
* @param tableId Required. Table ID of the table to list row access policies.
* @since 1.13
*/
protected List(java.lang.String projectId, java.lang.String datasetId, java.lang.String tableId) {
super(Bigquery.this, "GET", REST_PATH, null, com.google.api.services.bigquery.model.ListRowAccessPoliciesResponse.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_ID_PATTERN.matcher(projectId).matches(),
"Parameter projectId must conform to the pattern " +
"^[^/]+$");
}
this.datasetId = com.google.api.client.util.Preconditions.checkNotNull(datasetId, "Required parameter datasetId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(DATASET_ID_PATTERN.matcher(datasetId).matches(),
"Parameter datasetId must conform to the pattern " +
"^[^/]+$");
}
this.tableId = com.google.api.client.util.Preconditions.checkNotNull(tableId, "Required parameter tableId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TABLE_ID_PATTERN.matcher(tableId).matches(),
"Parameter tableId must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUserIp(java.lang.String userIp) {
return (List) super.setUserIp(userIp);
}
/** Required. Project ID of the row access policies to list. */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Required. Project ID of the row access policies to list.
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Required. Project ID of the row access policies to list. */
public List setProjectId(java.lang.String projectId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_ID_PATTERN.matcher(projectId).matches(),
"Parameter projectId must conform to the pattern " +
"^[^/]+$");
}
this.projectId = projectId;
return this;
}
/** Required. Dataset ID of row access policies to list. */
@com.google.api.client.util.Key
private java.lang.String datasetId;
/** Required. Dataset ID of row access policies to list.
*/
public java.lang.String getDatasetId() {
return datasetId;
}
/** Required. Dataset ID of row access policies to list. */
public List setDatasetId(java.lang.String datasetId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(DATASET_ID_PATTERN.matcher(datasetId).matches(),
"Parameter datasetId must conform to the pattern " +
"^[^/]+$");
}
this.datasetId = datasetId;
return this;
}
/** Required. Table ID of the table to list row access policies. */
@com.google.api.client.util.Key
private java.lang.String tableId;
/** Required. Table ID of the table to list row access policies.
*/
public java.lang.String getTableId() {
return tableId;
}
/** Required. Table ID of the table to list row access policies. */
public List setTableId(java.lang.String tableId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TABLE_ID_PATTERN.matcher(tableId).matches(),
"Parameter tableId must conform to the pattern " +
"^[^/]+$");
}
this.tableId = tableId;
return this;
}
/**
* The maximum number of results to return in a single response page. Leverage the page tokens
* to iterate through the entire collection.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** The maximum number of results to return in a single response page. Leverage the page tokens to
iterate through the entire collection.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* The maximum number of results to return in a single response page. Leverage the page tokens
* to iterate through the entire collection.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/** Page token, returned by a previous call, to request the next page of results. */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** Page token, returned by a previous call, to request the next page of results.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** Page token, returned by a previous call, to request the next page of results. */
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Sets the access control policy on the specified resource. Replaces any existing policy. Can
* return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
*
* Create a request for the method "rowAccessPolicies.setIamPolicy".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link SetIamPolicy#execute()} method to invoke the remote operation.
*
* @param resource REQUIRED: The resource for which the policy is being specified. See the operation documentation for
* the appropriate value for this field.
* @param content the {@link com.google.api.services.bigquery.model.SetIamPolicyRequest}
* @return the request
*/
public SetIamPolicy setIamPolicy(java.lang.String resource, com.google.api.services.bigquery.model.SetIamPolicyRequest content) throws java.io.IOException {
SetIamPolicy result = new SetIamPolicy(resource, content);
initialize(result);
return result;
}
public class SetIamPolicy extends BigqueryRequest<com.google.api.services.bigquery.model.Policy> {
private static final String REST_PATH = "{+resource}:setIamPolicy";
private final java.util.regex.Pattern RESOURCE_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/datasets/[^/]+/tables/[^/]+/rowAccessPolicies/[^/]+$");
/**
* Sets the access control policy on the specified resource. Replaces any existing policy. Can
* return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
*
* Create a request for the method "rowAccessPolicies.setIamPolicy".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link SetIamPolicy#execute()} method to invoke the remote
* operation. <p> {@link
* SetIamPolicy#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param resource REQUIRED: The resource for which the policy is being specified. See the operation documentation for
* the appropriate value for this field.
* @param content the {@link com.google.api.services.bigquery.model.SetIamPolicyRequest}
* @since 1.13
*/
protected SetIamPolicy(java.lang.String resource, com.google.api.services.bigquery.model.SetIamPolicyRequest content) {
super(Bigquery.this, "POST", REST_PATH, content, com.google.api.services.bigquery.model.Policy.class);
this.resource = com.google.api.client.util.Preconditions.checkNotNull(resource, "Required parameter resource must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/datasets/[^/]+/tables/[^/]+/rowAccessPolicies/[^/]+$");
}
}
@Override
public SetIamPolicy setAlt(java.lang.String alt) {
return (SetIamPolicy) super.setAlt(alt);
}
@Override
public SetIamPolicy setFields(java.lang.String fields) {
return (SetIamPolicy) super.setFields(fields);
}
@Override
public SetIamPolicy setKey(java.lang.String key) {
return (SetIamPolicy) super.setKey(key);
}
@Override
public SetIamPolicy setOauthToken(java.lang.String oauthToken) {
return (SetIamPolicy) super.setOauthToken(oauthToken);
}
@Override
public SetIamPolicy setPrettyPrint(java.lang.Boolean prettyPrint) {
return (SetIamPolicy) super.setPrettyPrint(prettyPrint);
}
@Override
public SetIamPolicy setQuotaUser(java.lang.String quotaUser) {
return (SetIamPolicy) super.setQuotaUser(quotaUser);
}
@Override
public SetIamPolicy setUserIp(java.lang.String userIp) {
return (SetIamPolicy) super.setUserIp(userIp);
}
/**
* REQUIRED: The resource for which the policy is being specified. See the operation
* documentation for the appropriate value for this field.
*/
@com.google.api.client.util.Key
private java.lang.String resource;
/** REQUIRED: The resource for which the policy is being specified. See the operation documentation for
the appropriate value for this field.
*/
public java.lang.String getResource() {
return resource;
}
/**
* REQUIRED: The resource for which the policy is being specified. See the operation
* documentation for the appropriate value for this field.
*/
public SetIamPolicy setResource(java.lang.String resource) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/datasets/[^/]+/tables/[^/]+/rowAccessPolicies/[^/]+$");
}
this.resource = resource;
return this;
}
@Override
public SetIamPolicy set(String parameterName, Object value) {
return (SetIamPolicy) super.set(parameterName, value);
}
}
/**
* Returns permissions that a caller has on the specified resource. If the resource does not exist,
* this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is
* designed to be used for building permission-aware UIs and command-line tools, not for
* authorization checking. This operation may "fail open" without warning.
*
* Create a request for the method "rowAccessPolicies.testIamPermissions".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link TestIamPermissions#execute()} method to invoke the remote operation.
*
* @param resource REQUIRED: The resource for which the policy detail is being requested. See the operation
* documentation for the appropriate value for this field.
* @param content the {@link com.google.api.services.bigquery.model.TestIamPermissionsRequest}
* @return the request
*/
public TestIamPermissions testIamPermissions(java.lang.String resource, com.google.api.services.bigquery.model.TestIamPermissionsRequest content) throws java.io.IOException {
TestIamPermissions result = new TestIamPermissions(resource, content);
initialize(result);
return result;
}
public class TestIamPermissions extends BigqueryRequest<com.google.api.services.bigquery.model.TestIamPermissionsResponse> {
private static final String REST_PATH = "{+resource}:testIamPermissions";
private final java.util.regex.Pattern RESOURCE_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/datasets/[^/]+/tables/[^/]+/rowAccessPolicies/[^/]+$");
/**
* Returns permissions that a caller has on the specified resource. If the resource does not
* exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This
* operation is designed to be used for building permission-aware UIs and command-line tools, not
* for authorization checking. This operation may "fail open" without warning.
*
* Create a request for the method "rowAccessPolicies.testIamPermissions".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link TestIamPermissions#execute()} method to invoke the remote
* operation. <p> {@link TestIamPermissions#initialize(com.google.api.client.googleapis.services.A
* bstractGoogleClientRequest)} must be called to initialize this instance immediately after
* invoking the constructor. </p>
*
* @param resource REQUIRED: The resource for which the policy detail is being requested. See the operation
* documentation for the appropriate value for this field.
* @param content the {@link com.google.api.services.bigquery.model.TestIamPermissionsRequest}
* @since 1.13
*/
protected TestIamPermissions(java.lang.String resource, com.google.api.services.bigquery.model.TestIamPermissionsRequest content) {
super(Bigquery.this, "POST", REST_PATH, content, com.google.api.services.bigquery.model.TestIamPermissionsResponse.class);
this.resource = com.google.api.client.util.Preconditions.checkNotNull(resource, "Required parameter resource must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/datasets/[^/]+/tables/[^/]+/rowAccessPolicies/[^/]+$");
}
}
@Override
public TestIamPermissions setAlt(java.lang.String alt) {
return (TestIamPermissions) super.setAlt(alt);
}
@Override
public TestIamPermissions setFields(java.lang.String fields) {
return (TestIamPermissions) super.setFields(fields);
}
@Override
public TestIamPermissions setKey(java.lang.String key) {
return (TestIamPermissions) super.setKey(key);
}
@Override
public TestIamPermissions setOauthToken(java.lang.String oauthToken) {
return (TestIamPermissions) super.setOauthToken(oauthToken);
}
@Override
public TestIamPermissions setPrettyPrint(java.lang.Boolean prettyPrint) {
return (TestIamPermissions) super.setPrettyPrint(prettyPrint);
}
@Override
public TestIamPermissions setQuotaUser(java.lang.String quotaUser) {
return (TestIamPermissions) super.setQuotaUser(quotaUser);
}
@Override
public TestIamPermissions setUserIp(java.lang.String userIp) {
return (TestIamPermissions) super.setUserIp(userIp);
}
/**
* REQUIRED: The resource for which the policy detail is being requested. See the operation
* documentation for the appropriate value for this field.
*/
@com.google.api.client.util.Key
private java.lang.String resource;
/** REQUIRED: The resource for which the policy detail is being requested. See the operation
documentation for the appropriate value for this field.
*/
public java.lang.String getResource() {
return resource;
}
/**
* REQUIRED: The resource for which the policy detail is being requested. See the operation
* documentation for the appropriate value for this field.
*/
public TestIamPermissions setResource(java.lang.String resource) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/datasets/[^/]+/tables/[^/]+/rowAccessPolicies/[^/]+$");
}
this.resource = resource;
return this;
}
@Override
public TestIamPermissions set(String parameterName, Object value) {
return (TestIamPermissions) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Tabledata collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code Bigquery bigquery = new Bigquery(...);}
* {@code Bigquery.Tabledata.List request = bigquery.tabledata().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Tabledata tabledata() {
return new Tabledata();
}
/**
* The "tabledata" collection of methods.
*/
public class Tabledata {
/**
* Streams data into BigQuery one record at a time without needing to run a load job. Requires the
* WRITER dataset role.
*
* Create a request for the method "tabledata.insertAll".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link InsertAll#execute()} method to invoke the remote operation.
*
* @param projectId Project ID of the destination table.
* @param datasetId Dataset ID of the destination table.
* @param tableId Table ID of the destination table.
* @param content the {@link com.google.api.services.bigquery.model.TableDataInsertAllRequest}
* @return the request
*/
public InsertAll insertAll(java.lang.String projectId, java.lang.String datasetId, java.lang.String tableId, com.google.api.services.bigquery.model.TableDataInsertAllRequest content) throws java.io.IOException {
InsertAll result = new InsertAll(projectId, datasetId, tableId, content);
initialize(result);
return result;
}
public class InsertAll extends BigqueryRequest<com.google.api.services.bigquery.model.TableDataInsertAllResponse> {
private static final String REST_PATH = "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/insertAll";
/**
* Streams data into BigQuery one record at a time without needing to run a load job. Requires the
* WRITER dataset role.
*
* Create a request for the method "tabledata.insertAll".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link InsertAll#execute()} method to invoke the remote
* operation. <p> {@link
* InsertAll#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Project ID of the destination table.
* @param datasetId Dataset ID of the destination table.
* @param tableId Table ID of the destination table.
* @param content the {@link com.google.api.services.bigquery.model.TableDataInsertAllRequest}
* @since 1.13
*/
protected InsertAll(java.lang.String projectId, java.lang.String datasetId, java.lang.String tableId, com.google.api.services.bigquery.model.TableDataInsertAllRequest content) {
super(Bigquery.this, "POST", REST_PATH, content, com.google.api.services.bigquery.model.TableDataInsertAllResponse.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
this.datasetId = com.google.api.client.util.Preconditions.checkNotNull(datasetId, "Required parameter datasetId must be specified.");
this.tableId = com.google.api.client.util.Preconditions.checkNotNull(tableId, "Required parameter tableId must be specified.");
}
@Override
public InsertAll setAlt(java.lang.String alt) {
return (InsertAll) super.setAlt(alt);
}
@Override
public InsertAll setFields(java.lang.String fields) {
return (InsertAll) super.setFields(fields);
}
@Override
public InsertAll setKey(java.lang.String key) {
return (InsertAll) super.setKey(key);
}
@Override
public InsertAll setOauthToken(java.lang.String oauthToken) {
return (InsertAll) super.setOauthToken(oauthToken);
}
@Override
public InsertAll setPrettyPrint(java.lang.Boolean prettyPrint) {
return (InsertAll) super.setPrettyPrint(prettyPrint);
}
@Override
public InsertAll setQuotaUser(java.lang.String quotaUser) {
return (InsertAll) super.setQuotaUser(quotaUser);
}
@Override
public InsertAll setUserIp(java.lang.String userIp) {
return (InsertAll) super.setUserIp(userIp);
}
/** Project ID of the destination table. */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Project ID of the destination table.
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Project ID of the destination table. */
public InsertAll setProjectId(java.lang.String projectId) {
this.projectId = projectId;
return this;
}
/** Dataset ID of the destination table. */
@com.google.api.client.util.Key
private java.lang.String datasetId;
/** Dataset ID of the destination table.
*/
public java.lang.String getDatasetId() {
return datasetId;
}
/** Dataset ID of the destination table. */
public InsertAll setDatasetId(java.lang.String datasetId) {
this.datasetId = datasetId;
return this;
}
/** Table ID of the destination table. */
@com.google.api.client.util.Key
private java.lang.String tableId;
/** Table ID of the destination table.
*/
public java.lang.String getTableId() {
return tableId;
}
/** Table ID of the destination table. */
public InsertAll setTableId(java.lang.String tableId) {
this.tableId = tableId;
return this;
}
@Override
public InsertAll set(String parameterName, Object value) {
return (InsertAll) super.set(parameterName, value);
}
}
/**
* Retrieves table data from a specified set of rows. Requires the READER dataset role.
*
* Create a request for the method "tabledata.list".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param projectId Project ID of the table to read
* @param datasetId Dataset ID of the table to read
* @param tableId Table ID of the table to read
* @return the request
*/
public List list(java.lang.String projectId, java.lang.String datasetId, java.lang.String tableId) throws java.io.IOException {
List result = new List(projectId, datasetId, tableId);
initialize(result);
return result;
}
public class List extends BigqueryRequest<com.google.api.services.bigquery.model.TableDataList> {
private static final String REST_PATH = "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/data";
/**
* Retrieves table data from a specified set of rows. Requires the READER dataset role.
*
* Create a request for the method "tabledata.list".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Project ID of the table to read
* @param datasetId Dataset ID of the table to read
* @param tableId Table ID of the table to read
* @since 1.13
*/
protected List(java.lang.String projectId, java.lang.String datasetId, java.lang.String tableId) {
super(Bigquery.this, "GET", REST_PATH, null, com.google.api.services.bigquery.model.TableDataList.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
this.datasetId = com.google.api.client.util.Preconditions.checkNotNull(datasetId, "Required parameter datasetId must be specified.");
this.tableId = com.google.api.client.util.Preconditions.checkNotNull(tableId, "Required parameter tableId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUserIp(java.lang.String userIp) {
return (List) super.setUserIp(userIp);
}
/** Project ID of the table to read */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Project ID of the table to read
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Project ID of the table to read */
public List setProjectId(java.lang.String projectId) {
this.projectId = projectId;
return this;
}
/** Dataset ID of the table to read */
@com.google.api.client.util.Key
private java.lang.String datasetId;
/** Dataset ID of the table to read
*/
public java.lang.String getDatasetId() {
return datasetId;
}
/** Dataset ID of the table to read */
public List setDatasetId(java.lang.String datasetId) {
this.datasetId = datasetId;
return this;
}
/** Table ID of the table to read */
@com.google.api.client.util.Key
private java.lang.String tableId;
/** Table ID of the table to read
*/
public java.lang.String getTableId() {
return tableId;
}
/** Table ID of the table to read */
public List setTableId(java.lang.String tableId) {
this.tableId = tableId;
return this;
}
/** Maximum number of results to return */
@com.google.api.client.util.Key
private java.lang.Long maxResults;
/** Maximum number of results to return
*/
public java.lang.Long getMaxResults() {
return maxResults;
}
/** Maximum number of results to return */
public List setMaxResults(java.lang.Long maxResults) {
this.maxResults = maxResults;
return this;
}
/** Page token, returned by a previous call, identifying the result set */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** Page token, returned by a previous call, identifying the result set
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** Page token, returned by a previous call, identifying the result set */
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
/** List of fields to return (comma-separated). If unspecified, all fields are returned */
@com.google.api.client.util.Key
private java.lang.String selectedFields;
/** List of fields to return (comma-separated). If unspecified, all fields are returned
*/
public java.lang.String getSelectedFields() {
return selectedFields;
}
/** List of fields to return (comma-separated). If unspecified, all fields are returned */
public List setSelectedFields(java.lang.String selectedFields) {
this.selectedFields = selectedFields;
return this;
}
/** Zero-based index of the starting row to read */
@com.google.api.client.util.Key
private java.math.BigInteger startIndex;
/** Zero-based index of the starting row to read
*/
public java.math.BigInteger getStartIndex() {
return startIndex;
}
/** Zero-based index of the starting row to read */
public List setStartIndex(java.math.BigInteger startIndex) {
this.startIndex = startIndex;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Tables collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code Bigquery bigquery = new Bigquery(...);}
* {@code Bigquery.Tables.List request = bigquery.tables().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Tables tables() {
return new Tables();
}
/**
* The "tables" collection of methods.
*/
public class Tables {
/**
* Deletes the table specified by tableId from the dataset. If the table contains data, all the data
* will be deleted.
*
* Create a request for the method "tables.delete".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param projectId Project ID of the table to delete
* @param datasetId Dataset ID of the table to delete
* @param tableId Table ID of the table to delete
* @return the request
*/
public Delete delete(java.lang.String projectId, java.lang.String datasetId, java.lang.String tableId) throws java.io.IOException {
Delete result = new Delete(projectId, datasetId, tableId);
initialize(result);
return result;
}
public class Delete extends BigqueryRequest<Void> {
private static final String REST_PATH = "projects/{projectId}/datasets/{datasetId}/tables/{tableId}";
/**
* Deletes the table specified by tableId from the dataset. If the table contains data, all the
* data will be deleted.
*
* Create a request for the method "tables.delete".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Project ID of the table to delete
* @param datasetId Dataset ID of the table to delete
* @param tableId Table ID of the table to delete
* @since 1.13
*/
protected Delete(java.lang.String projectId, java.lang.String datasetId, java.lang.String tableId) {
super(Bigquery.this, "DELETE", REST_PATH, null, Void.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
this.datasetId = com.google.api.client.util.Preconditions.checkNotNull(datasetId, "Required parameter datasetId must be specified.");
this.tableId = com.google.api.client.util.Preconditions.checkNotNull(tableId, "Required parameter tableId must be specified.");
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUserIp(java.lang.String userIp) {
return (Delete) super.setUserIp(userIp);
}
/** Project ID of the table to delete */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Project ID of the table to delete
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Project ID of the table to delete */
public Delete setProjectId(java.lang.String projectId) {
this.projectId = projectId;
return this;
}
/** Dataset ID of the table to delete */
@com.google.api.client.util.Key
private java.lang.String datasetId;
/** Dataset ID of the table to delete
*/
public java.lang.String getDatasetId() {
return datasetId;
}
/** Dataset ID of the table to delete */
public Delete setDatasetId(java.lang.String datasetId) {
this.datasetId = datasetId;
return this;
}
/** Table ID of the table to delete */
@com.google.api.client.util.Key
private java.lang.String tableId;
/** Table ID of the table to delete
*/
public java.lang.String getTableId() {
return tableId;
}
/** Table ID of the table to delete */
public Delete setTableId(java.lang.String tableId) {
this.tableId = tableId;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Gets the specified table resource by table ID. This method does not return the data in the table,
* it only returns the table resource, which describes the structure of this table.
*
* Create a request for the method "tables.get".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param projectId Project ID of the requested table
* @param datasetId Dataset ID of the requested table
* @param tableId Table ID of the requested table
* @return the request
*/
public Get get(java.lang.String projectId, java.lang.String datasetId, java.lang.String tableId) throws java.io.IOException {
Get result = new Get(projectId, datasetId, tableId);
initialize(result);
return result;
}
public class Get extends BigqueryRequest<com.google.api.services.bigquery.model.Table> {
private static final String REST_PATH = "projects/{projectId}/datasets/{datasetId}/tables/{tableId}";
/**
* Gets the specified table resource by table ID. This method does not return the data in the
* table, it only returns the table resource, which describes the structure of this table.
*
* Create a request for the method "tables.get".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Project ID of the requested table
* @param datasetId Dataset ID of the requested table
* @param tableId Table ID of the requested table
* @since 1.13
*/
protected Get(java.lang.String projectId, java.lang.String datasetId, java.lang.String tableId) {
super(Bigquery.this, "GET", REST_PATH, null, com.google.api.services.bigquery.model.Table.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
this.datasetId = com.google.api.client.util.Preconditions.checkNotNull(datasetId, "Required parameter datasetId must be specified.");
this.tableId = com.google.api.client.util.Preconditions.checkNotNull(tableId, "Required parameter tableId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUserIp(java.lang.String userIp) {
return (Get) super.setUserIp(userIp);
}
/** Project ID of the requested table */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Project ID of the requested table
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Project ID of the requested table */
public Get setProjectId(java.lang.String projectId) {
this.projectId = projectId;
return this;
}
/** Dataset ID of the requested table */
@com.google.api.client.util.Key
private java.lang.String datasetId;
/** Dataset ID of the requested table
*/
public java.lang.String getDatasetId() {
return datasetId;
}
/** Dataset ID of the requested table */
public Get setDatasetId(java.lang.String datasetId) {
this.datasetId = datasetId;
return this;
}
/** Table ID of the requested table */
@com.google.api.client.util.Key
private java.lang.String tableId;
/** Table ID of the requested table
*/
public java.lang.String getTableId() {
return tableId;
}
/** Table ID of the requested table */
public Get setTableId(java.lang.String tableId) {
this.tableId = tableId;
return this;
}
/** List of fields to return (comma-separated). If unspecified, all fields are returned */
@com.google.api.client.util.Key
private java.lang.String selectedFields;
/** List of fields to return (comma-separated). If unspecified, all fields are returned
*/
public java.lang.String getSelectedFields() {
return selectedFields;
}
/** List of fields to return (comma-separated). If unspecified, all fields are returned */
public Get setSelectedFields(java.lang.String selectedFields) {
this.selectedFields = selectedFields;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Gets the access control policy for a resource. Returns an empty policy if the resource exists and
* does not have a policy set.
*
* Create a request for the method "tables.getIamPolicy".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link GetIamPolicy#execute()} method to invoke the remote operation.
*
* @param resource REQUIRED: The resource for which the policy is being requested. See the operation documentation for
* the appropriate value for this field.
* @param content the {@link com.google.api.services.bigquery.model.GetIamPolicyRequest}
* @return the request
*/
public GetIamPolicy getIamPolicy(java.lang.String resource, com.google.api.services.bigquery.model.GetIamPolicyRequest content) throws java.io.IOException {
GetIamPolicy result = new GetIamPolicy(resource, content);
initialize(result);
return result;
}
public class GetIamPolicy extends BigqueryRequest<com.google.api.services.bigquery.model.Policy> {
private static final String REST_PATH = "{+resource}:getIamPolicy";
private final java.util.regex.Pattern RESOURCE_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/datasets/[^/]+/tables/[^/]+$");
/**
* Gets the access control policy for a resource. Returns an empty policy if the resource exists
* and does not have a policy set.
*
* Create a request for the method "tables.getIamPolicy".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link GetIamPolicy#execute()} method to invoke the remote
* operation. <p> {@link
* GetIamPolicy#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param resource REQUIRED: The resource for which the policy is being requested. See the operation documentation for
* the appropriate value for this field.
* @param content the {@link com.google.api.services.bigquery.model.GetIamPolicyRequest}
* @since 1.13
*/
protected GetIamPolicy(java.lang.String resource, com.google.api.services.bigquery.model.GetIamPolicyRequest content) {
super(Bigquery.this, "POST", REST_PATH, content, com.google.api.services.bigquery.model.Policy.class);
this.resource = com.google.api.client.util.Preconditions.checkNotNull(resource, "Required parameter resource must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/datasets/[^/]+/tables/[^/]+$");
}
}
@Override
public GetIamPolicy setAlt(java.lang.String alt) {
return (GetIamPolicy) super.setAlt(alt);
}
@Override
public GetIamPolicy setFields(java.lang.String fields) {
return (GetIamPolicy) super.setFields(fields);
}
@Override
public GetIamPolicy setKey(java.lang.String key) {
return (GetIamPolicy) super.setKey(key);
}
@Override
public GetIamPolicy setOauthToken(java.lang.String oauthToken) {
return (GetIamPolicy) super.setOauthToken(oauthToken);
}
@Override
public GetIamPolicy setPrettyPrint(java.lang.Boolean prettyPrint) {
return (GetIamPolicy) super.setPrettyPrint(prettyPrint);
}
@Override
public GetIamPolicy setQuotaUser(java.lang.String quotaUser) {
return (GetIamPolicy) super.setQuotaUser(quotaUser);
}
@Override
public GetIamPolicy setUserIp(java.lang.String userIp) {
return (GetIamPolicy) super.setUserIp(userIp);
}
/**
* REQUIRED: The resource for which the policy is being requested. See the operation
* documentation for the appropriate value for this field.
*/
@com.google.api.client.util.Key
private java.lang.String resource;
/** REQUIRED: The resource for which the policy is being requested. See the operation documentation for
the appropriate value for this field.
*/
public java.lang.String getResource() {
return resource;
}
/**
* REQUIRED: The resource for which the policy is being requested. See the operation
* documentation for the appropriate value for this field.
*/
public GetIamPolicy setResource(java.lang.String resource) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/datasets/[^/]+/tables/[^/]+$");
}
this.resource = resource;
return this;
}
@Override
public GetIamPolicy set(String parameterName, Object value) {
return (GetIamPolicy) super.set(parameterName, value);
}
}
/**
* Creates a new, empty table in the dataset.
*
* Create a request for the method "tables.insert".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link Insert#execute()} method to invoke the remote operation.
*
* @param projectId Project ID of the new table
* @param datasetId Dataset ID of the new table
* @param content the {@link com.google.api.services.bigquery.model.Table}
* @return the request
*/
public Insert insert(java.lang.String projectId, java.lang.String datasetId, com.google.api.services.bigquery.model.Table content) throws java.io.IOException {
Insert result = new Insert(projectId, datasetId, content);
initialize(result);
return result;
}
public class Insert extends BigqueryRequest<com.google.api.services.bigquery.model.Table> {
private static final String REST_PATH = "projects/{projectId}/datasets/{datasetId}/tables";
/**
* Creates a new, empty table in the dataset.
*
* Create a request for the method "tables.insert".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link Insert#execute()} method to invoke the remote operation.
* <p> {@link
* Insert#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Project ID of the new table
* @param datasetId Dataset ID of the new table
* @param content the {@link com.google.api.services.bigquery.model.Table}
* @since 1.13
*/
protected Insert(java.lang.String projectId, java.lang.String datasetId, com.google.api.services.bigquery.model.Table content) {
super(Bigquery.this, "POST", REST_PATH, content, com.google.api.services.bigquery.model.Table.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
this.datasetId = com.google.api.client.util.Preconditions.checkNotNull(datasetId, "Required parameter datasetId must be specified.");
}
@Override
public Insert setAlt(java.lang.String alt) {
return (Insert) super.setAlt(alt);
}
@Override
public Insert setFields(java.lang.String fields) {
return (Insert) super.setFields(fields);
}
@Override
public Insert setKey(java.lang.String key) {
return (Insert) super.setKey(key);
}
@Override
public Insert setOauthToken(java.lang.String oauthToken) {
return (Insert) super.setOauthToken(oauthToken);
}
@Override
public Insert setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Insert) super.setPrettyPrint(prettyPrint);
}
@Override
public Insert setQuotaUser(java.lang.String quotaUser) {
return (Insert) super.setQuotaUser(quotaUser);
}
@Override
public Insert setUserIp(java.lang.String userIp) {
return (Insert) super.setUserIp(userIp);
}
/** Project ID of the new table */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Project ID of the new table
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Project ID of the new table */
public Insert setProjectId(java.lang.String projectId) {
this.projectId = projectId;
return this;
}
/** Dataset ID of the new table */
@com.google.api.client.util.Key
private java.lang.String datasetId;
/** Dataset ID of the new table
*/
public java.lang.String getDatasetId() {
return datasetId;
}
/** Dataset ID of the new table */
public Insert setDatasetId(java.lang.String datasetId) {
this.datasetId = datasetId;
return this;
}
@Override
public Insert set(String parameterName, Object value) {
return (Insert) super.set(parameterName, value);
}
}
/**
* Lists all tables in the specified dataset. Requires the READER dataset role.
*
* Create a request for the method "tables.list".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param projectId Project ID of the tables to list
* @param datasetId Dataset ID of the tables to list
* @return the request
*/
public List list(java.lang.String projectId, java.lang.String datasetId) throws java.io.IOException {
List result = new List(projectId, datasetId);
initialize(result);
return result;
}
public class List extends BigqueryRequest<com.google.api.services.bigquery.model.TableList> {
private static final String REST_PATH = "projects/{projectId}/datasets/{datasetId}/tables";
/**
* Lists all tables in the specified dataset. Requires the READER dataset role.
*
* Create a request for the method "tables.list".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Project ID of the tables to list
* @param datasetId Dataset ID of the tables to list
* @since 1.13
*/
protected List(java.lang.String projectId, java.lang.String datasetId) {
super(Bigquery.this, "GET", REST_PATH, null, com.google.api.services.bigquery.model.TableList.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
this.datasetId = com.google.api.client.util.Preconditions.checkNotNull(datasetId, "Required parameter datasetId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUserIp(java.lang.String userIp) {
return (List) super.setUserIp(userIp);
}
/** Project ID of the tables to list */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Project ID of the tables to list
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Project ID of the tables to list */
public List setProjectId(java.lang.String projectId) {
this.projectId = projectId;
return this;
}
/** Dataset ID of the tables to list */
@com.google.api.client.util.Key
private java.lang.String datasetId;
/** Dataset ID of the tables to list
*/
public java.lang.String getDatasetId() {
return datasetId;
}
/** Dataset ID of the tables to list */
public List setDatasetId(java.lang.String datasetId) {
this.datasetId = datasetId;
return this;
}
/** Maximum number of results to return */
@com.google.api.client.util.Key
private java.lang.Long maxResults;
/** Maximum number of results to return
*/
public java.lang.Long getMaxResults() {
return maxResults;
}
/** Maximum number of results to return */
public List setMaxResults(java.lang.Long maxResults) {
this.maxResults = maxResults;
return this;
}
/** Page token, returned by a previous call, to request the next page of results */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** Page token, returned by a previous call, to request the next page of results
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** Page token, returned by a previous call, to request the next page of results */
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Updates information in an existing table. The update method replaces the entire table resource,
* whereas the patch method only replaces fields that are provided in the submitted table resource.
* This method supports patch semantics.
*
* Create a request for the method "tables.patch".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link Patch#execute()} method to invoke the remote operation.
*
* @param projectId Project ID of the table to update
* @param datasetId Dataset ID of the table to update
* @param tableId Table ID of the table to update
* @param content the {@link com.google.api.services.bigquery.model.Table}
* @return the request
*/
public Patch patch(java.lang.String projectId, java.lang.String datasetId, java.lang.String tableId, com.google.api.services.bigquery.model.Table content) throws java.io.IOException {
Patch result = new Patch(projectId, datasetId, tableId, content);
initialize(result);
return result;
}
public class Patch extends BigqueryRequest<com.google.api.services.bigquery.model.Table> {
private static final String REST_PATH = "projects/{projectId}/datasets/{datasetId}/tables/{tableId}";
/**
* Updates information in an existing table. The update method replaces the entire table resource,
* whereas the patch method only replaces fields that are provided in the submitted table
* resource. This method supports patch semantics.
*
* Create a request for the method "tables.patch".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link Patch#execute()} method to invoke the remote operation.
* <p> {@link
* Patch#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Project ID of the table to update
* @param datasetId Dataset ID of the table to update
* @param tableId Table ID of the table to update
* @param content the {@link com.google.api.services.bigquery.model.Table}
* @since 1.13
*/
protected Patch(java.lang.String projectId, java.lang.String datasetId, java.lang.String tableId, com.google.api.services.bigquery.model.Table content) {
super(Bigquery.this, "PATCH", REST_PATH, content, com.google.api.services.bigquery.model.Table.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
this.datasetId = com.google.api.client.util.Preconditions.checkNotNull(datasetId, "Required parameter datasetId must be specified.");
this.tableId = com.google.api.client.util.Preconditions.checkNotNull(tableId, "Required parameter tableId must be specified.");
}
@Override
public Patch setAlt(java.lang.String alt) {
return (Patch) super.setAlt(alt);
}
@Override
public Patch setFields(java.lang.String fields) {
return (Patch) super.setFields(fields);
}
@Override
public Patch setKey(java.lang.String key) {
return (Patch) super.setKey(key);
}
@Override
public Patch setOauthToken(java.lang.String oauthToken) {
return (Patch) super.setOauthToken(oauthToken);
}
@Override
public Patch setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Patch) super.setPrettyPrint(prettyPrint);
}
@Override
public Patch setQuotaUser(java.lang.String quotaUser) {
return (Patch) super.setQuotaUser(quotaUser);
}
@Override
public Patch setUserIp(java.lang.String userIp) {
return (Patch) super.setUserIp(userIp);
}
/** Project ID of the table to update */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Project ID of the table to update
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Project ID of the table to update */
public Patch setProjectId(java.lang.String projectId) {
this.projectId = projectId;
return this;
}
/** Dataset ID of the table to update */
@com.google.api.client.util.Key
private java.lang.String datasetId;
/** Dataset ID of the table to update
*/
public java.lang.String getDatasetId() {
return datasetId;
}
/** Dataset ID of the table to update */
public Patch setDatasetId(java.lang.String datasetId) {
this.datasetId = datasetId;
return this;
}
/** Table ID of the table to update */
@com.google.api.client.util.Key
private java.lang.String tableId;
/** Table ID of the table to update
*/
public java.lang.String getTableId() {
return tableId;
}
/** Table ID of the table to update */
public Patch setTableId(java.lang.String tableId) {
this.tableId = tableId;
return this;
}
@Override
public Patch set(String parameterName, Object value) {
return (Patch) super.set(parameterName, value);
}
}
/**
* Sets the access control policy on the specified resource. Replaces any existing policy. Can
* return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
*
* Create a request for the method "tables.setIamPolicy".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link SetIamPolicy#execute()} method to invoke the remote operation.
*
* @param resource REQUIRED: The resource for which the policy is being specified. See the operation documentation for
* the appropriate value for this field.
* @param content the {@link com.google.api.services.bigquery.model.SetIamPolicyRequest}
* @return the request
*/
public SetIamPolicy setIamPolicy(java.lang.String resource, com.google.api.services.bigquery.model.SetIamPolicyRequest content) throws java.io.IOException {
SetIamPolicy result = new SetIamPolicy(resource, content);
initialize(result);
return result;
}
public class SetIamPolicy extends BigqueryRequest<com.google.api.services.bigquery.model.Policy> {
private static final String REST_PATH = "{+resource}:setIamPolicy";
private final java.util.regex.Pattern RESOURCE_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/datasets/[^/]+/tables/[^/]+$");
/**
* Sets the access control policy on the specified resource. Replaces any existing policy. Can
* return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
*
* Create a request for the method "tables.setIamPolicy".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link SetIamPolicy#execute()} method to invoke the remote
* operation. <p> {@link
* SetIamPolicy#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param resource REQUIRED: The resource for which the policy is being specified. See the operation documentation for
* the appropriate value for this field.
* @param content the {@link com.google.api.services.bigquery.model.SetIamPolicyRequest}
* @since 1.13
*/
protected SetIamPolicy(java.lang.String resource, com.google.api.services.bigquery.model.SetIamPolicyRequest content) {
super(Bigquery.this, "POST", REST_PATH, content, com.google.api.services.bigquery.model.Policy.class);
this.resource = com.google.api.client.util.Preconditions.checkNotNull(resource, "Required parameter resource must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/datasets/[^/]+/tables/[^/]+$");
}
}
@Override
public SetIamPolicy setAlt(java.lang.String alt) {
return (SetIamPolicy) super.setAlt(alt);
}
@Override
public SetIamPolicy setFields(java.lang.String fields) {
return (SetIamPolicy) super.setFields(fields);
}
@Override
public SetIamPolicy setKey(java.lang.String key) {
return (SetIamPolicy) super.setKey(key);
}
@Override
public SetIamPolicy setOauthToken(java.lang.String oauthToken) {
return (SetIamPolicy) super.setOauthToken(oauthToken);
}
@Override
public SetIamPolicy setPrettyPrint(java.lang.Boolean prettyPrint) {
return (SetIamPolicy) super.setPrettyPrint(prettyPrint);
}
@Override
public SetIamPolicy setQuotaUser(java.lang.String quotaUser) {
return (SetIamPolicy) super.setQuotaUser(quotaUser);
}
@Override
public SetIamPolicy setUserIp(java.lang.String userIp) {
return (SetIamPolicy) super.setUserIp(userIp);
}
/**
* REQUIRED: The resource for which the policy is being specified. See the operation
* documentation for the appropriate value for this field.
*/
@com.google.api.client.util.Key
private java.lang.String resource;
/** REQUIRED: The resource for which the policy is being specified. See the operation documentation for
the appropriate value for this field.
*/
public java.lang.String getResource() {
return resource;
}
/**
* REQUIRED: The resource for which the policy is being specified. See the operation
* documentation for the appropriate value for this field.
*/
public SetIamPolicy setResource(java.lang.String resource) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/datasets/[^/]+/tables/[^/]+$");
}
this.resource = resource;
return this;
}
@Override
public SetIamPolicy set(String parameterName, Object value) {
return (SetIamPolicy) super.set(parameterName, value);
}
}
/**
* Returns permissions that a caller has on the specified resource. If the resource does not exist,
* this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is
* designed to be used for building permission-aware UIs and command-line tools, not for
* authorization checking. This operation may "fail open" without warning.
*
* Create a request for the method "tables.testIamPermissions".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link TestIamPermissions#execute()} method to invoke the remote operation.
*
* @param resource REQUIRED: The resource for which the policy detail is being requested. See the operation
* documentation for the appropriate value for this field.
* @param content the {@link com.google.api.services.bigquery.model.TestIamPermissionsRequest}
* @return the request
*/
public TestIamPermissions testIamPermissions(java.lang.String resource, com.google.api.services.bigquery.model.TestIamPermissionsRequest content) throws java.io.IOException {
TestIamPermissions result = new TestIamPermissions(resource, content);
initialize(result);
return result;
}
public class TestIamPermissions extends BigqueryRequest<com.google.api.services.bigquery.model.TestIamPermissionsResponse> {
private static final String REST_PATH = "{+resource}:testIamPermissions";
private final java.util.regex.Pattern RESOURCE_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/datasets/[^/]+/tables/[^/]+$");
/**
* Returns permissions that a caller has on the specified resource. If the resource does not
* exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This
* operation is designed to be used for building permission-aware UIs and command-line tools, not
* for authorization checking. This operation may "fail open" without warning.
*
* Create a request for the method "tables.testIamPermissions".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link TestIamPermissions#execute()} method to invoke the remote
* operation. <p> {@link TestIamPermissions#initialize(com.google.api.client.googleapis.services.A
* bstractGoogleClientRequest)} must be called to initialize this instance immediately after
* invoking the constructor. </p>
*
* @param resource REQUIRED: The resource for which the policy detail is being requested. See the operation
* documentation for the appropriate value for this field.
* @param content the {@link com.google.api.services.bigquery.model.TestIamPermissionsRequest}
* @since 1.13
*/
protected TestIamPermissions(java.lang.String resource, com.google.api.services.bigquery.model.TestIamPermissionsRequest content) {
super(Bigquery.this, "POST", REST_PATH, content, com.google.api.services.bigquery.model.TestIamPermissionsResponse.class);
this.resource = com.google.api.client.util.Preconditions.checkNotNull(resource, "Required parameter resource must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/datasets/[^/]+/tables/[^/]+$");
}
}
@Override
public TestIamPermissions setAlt(java.lang.String alt) {
return (TestIamPermissions) super.setAlt(alt);
}
@Override
public TestIamPermissions setFields(java.lang.String fields) {
return (TestIamPermissions) super.setFields(fields);
}
@Override
public TestIamPermissions setKey(java.lang.String key) {
return (TestIamPermissions) super.setKey(key);
}
@Override
public TestIamPermissions setOauthToken(java.lang.String oauthToken) {
return (TestIamPermissions) super.setOauthToken(oauthToken);
}
@Override
public TestIamPermissions setPrettyPrint(java.lang.Boolean prettyPrint) {
return (TestIamPermissions) super.setPrettyPrint(prettyPrint);
}
@Override
public TestIamPermissions setQuotaUser(java.lang.String quotaUser) {
return (TestIamPermissions) super.setQuotaUser(quotaUser);
}
@Override
public TestIamPermissions setUserIp(java.lang.String userIp) {
return (TestIamPermissions) super.setUserIp(userIp);
}
/**
* REQUIRED: The resource for which the policy detail is being requested. See the operation
* documentation for the appropriate value for this field.
*/
@com.google.api.client.util.Key
private java.lang.String resource;
/** REQUIRED: The resource for which the policy detail is being requested. See the operation
documentation for the appropriate value for this field.
*/
public java.lang.String getResource() {
return resource;
}
/**
* REQUIRED: The resource for which the policy detail is being requested. See the operation
* documentation for the appropriate value for this field.
*/
public TestIamPermissions setResource(java.lang.String resource) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/datasets/[^/]+/tables/[^/]+$");
}
this.resource = resource;
return this;
}
@Override
public TestIamPermissions set(String parameterName, Object value) {
return (TestIamPermissions) super.set(parameterName, value);
}
}
/**
* Updates information in an existing table. The update method replaces the entire table resource,
* whereas the patch method only replaces fields that are provided in the submitted table resource.
*
* Create a request for the method "tables.update".
*
* This request holds the parameters needed by the bigquery server. After setting any optional
* parameters, call the {@link Update#execute()} method to invoke the remote operation.
*
* @param projectId Project ID of the table to update
* @param datasetId Dataset ID of the table to update
* @param tableId Table ID of the table to update
* @param content the {@link com.google.api.services.bigquery.model.Table}
* @return the request
*/
public Update update(java.lang.String projectId, java.lang.String datasetId, java.lang.String tableId, com.google.api.services.bigquery.model.Table content) throws java.io.IOException {
Update result = new Update(projectId, datasetId, tableId, content);
initialize(result);
return result;
}
public class Update extends BigqueryRequest<com.google.api.services.bigquery.model.Table> {
private static final String REST_PATH = "projects/{projectId}/datasets/{datasetId}/tables/{tableId}";
/**
* Updates information in an existing table. The update method replaces the entire table resource,
* whereas the patch method only replaces fields that are provided in the submitted table
* resource.
*
* Create a request for the method "tables.update".
*
* This request holds the parameters needed by the the bigquery server. After setting any
* optional parameters, call the {@link Update#execute()} method to invoke the remote operation.
* <p> {@link
* Update#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param projectId Project ID of the table to update
* @param datasetId Dataset ID of the table to update
* @param tableId Table ID of the table to update
* @param content the {@link com.google.api.services.bigquery.model.Table}
* @since 1.13
*/
protected Update(java.lang.String projectId, java.lang.String datasetId, java.lang.String tableId, com.google.api.services.bigquery.model.Table content) {
super(Bigquery.this, "PUT", REST_PATH, content, com.google.api.services.bigquery.model.Table.class);
this.projectId = com.google.api.client.util.Preconditions.checkNotNull(projectId, "Required parameter projectId must be specified.");
this.datasetId = com.google.api.client.util.Preconditions.checkNotNull(datasetId, "Required parameter datasetId must be specified.");
this.tableId = com.google.api.client.util.Preconditions.checkNotNull(tableId, "Required parameter tableId must be specified.");
}
@Override
public Update setAlt(java.lang.String alt) {
return (Update) super.setAlt(alt);
}
@Override
public Update setFields(java.lang.String fields) {
return (Update) super.setFields(fields);
}
@Override
public Update setKey(java.lang.String key) {
return (Update) super.setKey(key);
}
@Override
public Update setOauthToken(java.lang.String oauthToken) {
return (Update) super.setOauthToken(oauthToken);
}
@Override
public Update setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Update) super.setPrettyPrint(prettyPrint);
}
@Override
public Update setQuotaUser(java.lang.String quotaUser) {
return (Update) super.setQuotaUser(quotaUser);
}
@Override
public Update setUserIp(java.lang.String userIp) {
return (Update) super.setUserIp(userIp);
}
/** Project ID of the table to update */
@com.google.api.client.util.Key
private java.lang.String projectId;
/** Project ID of the table to update
*/
public java.lang.String getProjectId() {
return projectId;
}
/** Project ID of the table to update */
public Update setProjectId(java.lang.String projectId) {
this.projectId = projectId;
return this;
}
/** Dataset ID of the table to update */
@com.google.api.client.util.Key
private java.lang.String datasetId;
/** Dataset ID of the table to update
*/
public java.lang.String getDatasetId() {
return datasetId;
}
/** Dataset ID of the table to update */
public Update setDatasetId(java.lang.String datasetId) {
this.datasetId = datasetId;
return this;
}
/** Table ID of the table to update */
@com.google.api.client.util.Key
private java.lang.String tableId;
/** Table ID of the table to update
*/
public java.lang.String getTableId() {
return tableId;
}
/** Table ID of the table to update */
public Update setTableId(java.lang.String tableId) {
this.tableId = tableId;
return this;
}
@Override
public Update set(String parameterName, Object value) {
return (Update) super.set(parameterName, value);
}
}
}
/**
* Builder for {@link Bigquery}.
*
* <p>
* Implementation is not thread-safe.
* </p>
*
* @since 1.3.0
*/
public static final class Builder extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient.Builder {
private static String chooseEndpoint(com.google.api.client.http.HttpTransport transport) {
// If the GOOGLE_API_USE_MTLS_ENDPOINT environment variable value is "always", use mTLS endpoint.
// If the env variable is "auto", use mTLS endpoint if and only if the transport is mTLS.
// Use the regular endpoint for all other cases.
String useMtlsEndpoint = System.getenv("GOOGLE_API_USE_MTLS_ENDPOINT");
useMtlsEndpoint = useMtlsEndpoint == null ? "auto" : useMtlsEndpoint;
if ("always".equals(useMtlsEndpoint) || ("auto".equals(useMtlsEndpoint) && transport != null && transport.isMtls())) {
return DEFAULT_MTLS_ROOT_URL;
}
return DEFAULT_ROOT_URL;
}
/**
* Returns an instance of a new builder.
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public Builder(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
super(
transport,
jsonFactory,
Builder.chooseEndpoint(transport),
DEFAULT_SERVICE_PATH,
httpRequestInitializer,
false);
setBatchPath(DEFAULT_BATCH_PATH);
}
/** Builds a new instance of {@link Bigquery}. */
@Override
public Bigquery build() {
return new Bigquery(this);
}
@Override
public Builder setRootUrl(String rootUrl) {
return (Builder) super.setRootUrl(rootUrl);
}
@Override
public Builder setServicePath(String servicePath) {
return (Builder) super.setServicePath(servicePath);
}
@Override
public Builder setBatchPath(String batchPath) {
return (Builder) super.setBatchPath(batchPath);
}
@Override
public Builder setHttpRequestInitializer(com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
return (Builder) super.setHttpRequestInitializer(httpRequestInitializer);
}
@Override
public Builder setApplicationName(String applicationName) {
return (Builder) super.setApplicationName(applicationName);
}
@Override
public Builder setSuppressPatternChecks(boolean suppressPatternChecks) {
return (Builder) super.setSuppressPatternChecks(suppressPatternChecks);
}
@Override
public Builder setSuppressRequiredParameterChecks(boolean suppressRequiredParameterChecks) {
return (Builder) super.setSuppressRequiredParameterChecks(suppressRequiredParameterChecks);
}
@Override
public Builder setSuppressAllChecks(boolean suppressAllChecks) {
return (Builder) super.setSuppressAllChecks(suppressAllChecks);
}
/**
* Set the {@link BigqueryRequestInitializer}.
*
* @since 1.12
*/
public Builder setBigqueryRequestInitializer(
BigqueryRequestInitializer bigqueryRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(bigqueryRequestInitializer);
}
@Override
public Builder setGoogleClientRequestInitializer(
com.google.api.client.googleapis.services.GoogleClientRequestInitializer googleClientRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(googleClientRequestInitializer);
}
}
}
|
[
"\"GOOGLE_API_USE_MTLS_ENDPOINT\""
] |
[] |
[
"GOOGLE_API_USE_MTLS_ENDPOINT"
] |
[]
|
["GOOGLE_API_USE_MTLS_ENDPOINT"]
|
java
| 1 | 0 | |
pkg/skbn/kube.go
|
package skbn
import (
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/Cognologix/skbn/pkg/utils"
core_v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/remotecommand"
)
// K8sClient holds a clientset and a config
type K8sClient struct {
ClientSet *kubernetes.Clientset
Config *rest.Config
}
type K8sFile struct {
name, eTag string
}
// GetClientToK8s returns a k8sClient
func GetClientToK8s() (*K8sClient, error) {
var kubeconfig string
if kubeConfigPath := os.Getenv("KUBECONFIG"); kubeConfigPath != "" {
kubeconfig = kubeConfigPath // CI process
} else {
kubeconfig = filepath.Join(os.Getenv("HOME"), ".kube", "config") // Development environment
}
var config *rest.Config
_, err := os.Stat(kubeconfig)
if err != nil {
// In cluster configuration
config, err = rest.InClusterConfig()
if err != nil {
return nil, err
}
} else {
// Out of cluster configuration
config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return nil, err
}
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
var client = &K8sClient{ClientSet: clientset, Config: config}
return client, nil
}
// GetListOfFilesFromK8s gets list of files in path from Kubernetes (recursive)
func GetListOfFilesFromK8s(iClient interface{}, path, findType, findName string) ([]string, error) {
client := *iClient.(*K8sClient)
pSplit := strings.Split(path, "/")
if err := validateK8sPath(pSplit); err != nil {
return nil, err
}
namespace, podName, containerName, findPath := initK8sVariables(pSplit)
command := []string{"find", findPath, "-type", findType, "-name", findName}
attempts := 3
attempt := 0
for attempt < attempts {
attempt++
output := new(bytes.Buffer)
stderr, err := Exec(client, namespace, podName, containerName, command, nil, output)
if len(stderr) != 0 {
if attempt == attempts {
return nil, fmt.Errorf("STDERR: " + (string)(stderr))
}
utils.Sleep(attempt)
continue
}
if err != nil {
if attempt == attempts {
return nil, err
}
utils.Sleep(attempt)
continue
}
lines := strings.Split(output.String(), "\n")
var outLines []string
for _, line := range lines {
if line != "" {
outLines = append(outLines, filepath.Base(line))
}
}
return outLines, nil
}
return nil, nil
}
// GetListOfFilesFromK8sV2 gets list of files in path from Kubernetes (recursive)
func GetListOfFilesFromK8sV2(iClient interface{}, path, findType, findName string) (map[string]*K8sFile, error) {
client := *iClient.(*K8sClient)
pSplit := strings.Split(path, "/")
if err := validateK8sPath(pSplit); err != nil {
return nil, err
}
namespace, podName, containerName, findPath := initK8sVariables(pSplit)
command := []string{"find", findPath, "-type", findType, "-name", findName}
attempts := 3
attempt := 0
for attempt < attempts {
attempt++
output := new(bytes.Buffer)
stderr, err := Exec(client, namespace, podName, containerName, command, nil, output)
if len(stderr) != 0 {
if attempt == attempts {
return nil, fmt.Errorf("STDERR: " + (string)(stderr))
}
utils.Sleep(attempt)
continue
}
if err != nil {
if attempt == attempts {
return nil, err
}
utils.Sleep(attempt)
continue
}
lines := strings.Split(output.String(), "\n")
k8sFiles := make(map[string]*K8sFile)
for _, line := range lines {
if line != "" {
name := filepath.Base(line)
k8sFiles[name] = &K8sFile{name: filepath.Base(line)}
}
}
return k8sFiles, nil
}
return nil, nil
}
// DownloadFromK8s downloads a single file from Kubernetes
func DownloadFromK8s(iClient interface{}, path string, writer io.Writer) error {
client := *iClient.(*K8sClient)
pSplit := strings.Split(path, "/")
if err := validateK8sPath(pSplit); err != nil {
return err
}
namespace, podName, containerName, pathToCopy := initK8sVariables(pSplit)
command := []string{"cat", pathToCopy}
attempts := 3
attempt := 0
for attempt < attempts {
attempt++
stderr, err := Exec(client, namespace, podName, containerName, command, nil, writer)
if attempt == attempts {
if len(stderr) != 0 {
return fmt.Errorf("STDERR: " + (string)(stderr))
}
if err != nil {
return err
}
}
if err == nil {
return nil
}
utils.Sleep(attempt)
}
return nil
}
// UploadToK8s uploads a single file to Kubernetes
func UploadToK8s(iClient interface{}, toPath, fromPath string, reader io.Reader) error {
client := *iClient.(*K8sClient)
pSplit := strings.Split(toPath, "/")
if err := validateK8sPath(pSplit); err != nil {
return err
}
if len(pSplit) == 3 {
_, fileName := filepath.Split(fromPath)
pSplit = append(pSplit, fileName)
}
namespace, podName, containerName, pathToCopy := initK8sVariables(pSplit)
attempts := 3
attempt := 0
for attempt < attempts {
attempt++
dir, _ := filepath.Split(pathToCopy)
command := []string{"mkdir", "-p", dir}
stderr, err := Exec(client, namespace, podName, containerName, command, nil, nil)
if len(stderr) != 0 {
if attempt == attempts {
return fmt.Errorf("STDERR: " + (string)(stderr))
}
utils.Sleep(attempt)
continue
}
if err != nil {
if attempt == attempts {
return err
}
utils.Sleep(attempt)
continue
}
command = []string{"touch", pathToCopy}
stderr, err = Exec(client, namespace, podName, containerName, command, nil, nil)
if len(stderr) != 0 {
if attempt == attempts {
return fmt.Errorf("STDERR: " + (string)(stderr))
}
utils.Sleep(attempt)
continue
}
if err != nil {
if attempt == attempts {
return err
}
utils.Sleep(attempt)
continue
}
command = []string{"cp", "/dev/stdin", pathToCopy}
stderr, err = Exec(client, namespace, podName, containerName, command, readerWrapper{reader}, nil)
if len(stderr) != 0 {
if attempt == attempts {
return fmt.Errorf("STDERR: " + (string)(stderr))
}
utils.Sleep(attempt)
continue
}
if err != nil {
if attempt == attempts {
return err
}
utils.Sleep(attempt)
continue
}
return nil
}
return nil
}
type readerWrapper struct {
reader io.Reader
}
func (r readerWrapper) Read(p []byte) (int, error) {
return r.reader.Read(p)
}
// Exec executes a command in a given container
func Exec(client K8sClient, namespace, podName, containerName string, command []string, stdin io.Reader, stdout io.Writer) ([]byte, error) {
clientset, config := client.ClientSet, client.Config
req := clientset.Core().RESTClient().Post().
Resource("pods").
Name(podName).
Namespace(namespace).
SubResource("exec")
scheme := runtime.NewScheme()
if err := core_v1.AddToScheme(scheme); err != nil {
return nil, fmt.Errorf("error adding to scheme: %v", err)
}
parameterCodec := runtime.NewParameterCodec(scheme)
req.VersionedParams(&core_v1.PodExecOptions{
Command: command,
Container: containerName,
Stdin: stdin != nil,
Stdout: stdout != nil,
Stderr: true,
TTY: false,
}, parameterCodec)
exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
if err != nil {
return nil, fmt.Errorf("error while creating Executor: %v", err)
}
var stderr bytes.Buffer
err = exec.Stream(remotecommand.StreamOptions{
Stdin: stdin,
Stdout: stdout,
Stderr: &stderr,
Tty: false,
})
if err != nil {
return nil, fmt.Errorf("error in Stream: %v", err)
}
return stderr.Bytes(), nil
}
func validateK8sPath(pathSplit []string) error {
if len(pathSplit) >= 3 {
return nil
}
return fmt.Errorf("illegal path: %s", filepath.Join(pathSplit...))
}
func initK8sVariables(split []string) (string, string, string, string) {
namespace := split[0]
pod := split[1]
container := split[2]
path := getAbsPath(split[3:]...)
return namespace, pod, container, path
}
func getAbsPath(path ...string) string {
return filepath.Join("/", filepath.Join(path...))
}
// DeleteFromK8s deletes a single file from Kubernetes
func DeleteFromK8s(iClient interface{}, path string) error {
client := *iClient.(*K8sClient)
pSplit := strings.Split(path, "/")
if err := validateK8sPath(pSplit); err != nil {
return err
}
namespace, podName, containerName, pathToDelete := initK8sVariables(pSplit)
command := []string{"rm", pathToDelete}
attempts := 3
attempt := 0
for attempt < attempts {
attempt++
stderr, err := Exec(client, namespace, podName, containerName, command, nil, nil)
if attempt == attempts {
if len(stderr) != 0 {
return fmt.Errorf("STDERR: " + (string)(stderr))
}
if err != nil {
return err
}
}
if err == nil {
return nil
}
utils.Sleep(attempt)
}
return nil
}
func SetFileETag(iClient interface{}, path string, files map[string]*K8sFile) error {
client := *iClient.(*K8sClient)
pSplit := strings.Split(path, "/")
if err := validateK8sPath(pSplit); err != nil {
return err
}
namespace, podName, containerName, dirPath := initK8sVariables(pSplit)
for _, file := range files {
fileAbsolutePath := dirPath + "/" + file.name
command := []string{"md5sum", fileAbsolutePath}
attempts := 3
attempt := 0
for attempt < attempts {
attempt++
output := new(bytes.Buffer)
stderr, err := Exec(client, namespace, podName, containerName, command, nil, output)
if len(stderr) != 0 {
if attempt == attempts {
return fmt.Errorf("STDERR: " + (string)(stderr))
}
utils.Sleep(attempt)
continue
}
if err != nil {
if attempt == attempts {
return err
}
utils.Sleep(attempt)
continue
}
md5 := strings.Fields(output.String())[0]
file.eTag = md5
}
}
return nil
}
|
[
"\"KUBECONFIG\"",
"\"HOME\""
] |
[] |
[
"HOME",
"KUBECONFIG"
] |
[]
|
["HOME", "KUBECONFIG"]
|
go
| 2 | 0 | |
api/datastore/internal/datastoretest/test.go
|
package datastoretest
import (
"bytes"
"context"
"log"
"testing"
"github.com/iron-io/functions/api/models"
"net/http"
"net/url"
"os"
"reflect"
"github.com/Sirupsen/logrus"
"github.com/gin-gonic/gin"
)
func setLogBuffer() *bytes.Buffer {
var buf bytes.Buffer
buf.WriteByte('\n')
logrus.SetOutput(&buf)
gin.DefaultErrorWriter = &buf
gin.DefaultWriter = &buf
log.SetOutput(&buf)
return &buf
}
func GetContainerHostIP() string {
dockerHost := os.Getenv("DOCKER_HOST")
if dockerHost == "" {
return "127.0.0.1"
}
parts, _ := url.Parse(dockerHost)
return parts.Hostname()
}
func Test(t *testing.T, ds models.Datastore) {
buf := setLogBuffer()
ctx := context.Background()
t.Run("apps", func(t *testing.T) {
// Testing insert app
_, err := ds.InsertApp(ctx, nil)
if err != models.ErrDatastoreEmptyApp {
t.Log(buf.String())
t.Fatalf("Test InsertApp(nil): expected error `%v`, but it was `%v`", models.ErrDatastoreEmptyApp, err)
}
_, err = ds.InsertApp(ctx, &models.App{})
if err != models.ErrDatastoreEmptyAppName {
t.Log(buf.String())
t.Fatalf("Test InsertApp(&{}): expected error `%v`, but it was `%v`", models.ErrDatastoreEmptyAppName, err)
}
inserted, err := ds.InsertApp(ctx, testApp)
if err != nil {
t.Log(buf.String())
t.Fatalf("Test InsertApp: error when storing new app: %s", err)
}
if !reflect.DeepEqual(*inserted, *testApp) {
t.Log(buf.String())
t.Fatalf("Test InsertApp: expected to insert:\n%v\nbut got:\n%v", testApp, inserted)
}
_, err = ds.InsertApp(ctx, testApp)
if err != models.ErrAppsAlreadyExists {
t.Log(buf.String())
t.Fatalf("Test InsertApp duplicated: expected error `%v`, but it was `%v`", models.ErrAppsAlreadyExists, err)
}
{
// Set a config var
updated, err := ds.UpdateApp(ctx,
&models.App{Name: testApp.Name, Config: map[string]string{"TEST": "1"}})
if err != nil {
t.Log(buf.String())
t.Fatalf("Test UpdateApp: error when updating app: %v", err)
}
expected := &models.App{Name: testApp.Name, Config: map[string]string{"TEST": "1"}}
if !reflect.DeepEqual(*updated, *expected) {
t.Log(buf.String())
t.Fatalf("Test UpdateApp: expected updated `%v` but got `%v`", expected, updated)
}
// Set a different var (without clearing the existing)
updated, err = ds.UpdateApp(ctx,
&models.App{Name: testApp.Name, Config: map[string]string{"OTHER": "TEST"}})
if err != nil {
t.Log(buf.String())
t.Fatalf("Test UpdateApp: error when updating app: %v", err)
}
expected = &models.App{Name: testApp.Name, Config: map[string]string{"TEST": "1", "OTHER": "TEST"}}
if !reflect.DeepEqual(*updated, *expected) {
t.Log(buf.String())
t.Fatalf("Test UpdateApp: expected updated `%v` but got `%v`", expected, updated)
}
// Delete a var
updated, err = ds.UpdateApp(ctx,
&models.App{Name: testApp.Name, Config: map[string]string{"TEST": ""}})
if err != nil {
t.Log(buf.String())
t.Fatalf("Test UpdateApp: error when updating app: %v", err)
}
expected = &models.App{Name: testApp.Name, Config: map[string]string{"OTHER": "TEST"}}
if !reflect.DeepEqual(*updated, *expected) {
t.Log(buf.String())
t.Fatalf("Test UpdateApp: expected updated `%v` but got `%v`", expected, updated)
}
}
// Testing get app
_, err = ds.GetApp(ctx, "")
if err != models.ErrDatastoreEmptyAppName {
t.Log(buf.String())
t.Fatalf("Test GetApp: expected error to be %v, but it was %s", models.ErrDatastoreEmptyAppName, err)
}
app, err := ds.GetApp(ctx, testApp.Name)
if err != nil {
t.Log(buf.String())
t.Fatalf("Test GetApp: error: %s", err)
}
if app.Name != testApp.Name {
t.Log(buf.String())
t.Fatalf("Test GetApp: expected `app.Name` to be `%s` but it was `%s`", app.Name, testApp.Name)
}
// Testing list apps
apps, err := ds.GetApps(ctx, &models.AppFilter{})
if err != nil {
t.Log(buf.String())
t.Fatalf("Test GetApps: unexpected error %v", err)
}
if len(apps) == 0 {
t.Fatal("Test GetApps: expected result count to be greater than 0")
}
if apps[0].Name != testApp.Name {
t.Log(buf.String())
t.Fatalf("Test GetApps: expected `app.Name` to be `%s` but it was `%s`", app.Name, testApp.Name)
}
apps, err = ds.GetApps(ctx, &models.AppFilter{Name: "Tes%"})
if err != nil {
t.Log(buf.String())
t.Fatalf("Test GetApps(filter): unexpected error %v", err)
}
if len(apps) == 0 {
t.Fatal("Test GetApps(filter): expected result count to be greater than 0")
}
// Testing app delete
err = ds.RemoveApp(ctx, "")
if err != models.ErrDatastoreEmptyAppName {
t.Log(buf.String())
t.Fatalf("Test RemoveApp: expected error `%v`, but it was `%v`", models.ErrDatastoreEmptyAppName, err)
}
err = ds.RemoveApp(ctx, testApp.Name)
if err != nil {
t.Log(buf.String())
t.Fatalf("Test RemoveApp: error: %s", err)
}
app, err = ds.GetApp(ctx, testApp.Name)
if err != models.ErrAppsNotFound {
t.Log(buf.String())
t.Fatalf("Test GetApp(removed): expected error `%v`, but it was `%v`", models.ErrAppsNotFound, err)
}
if app != nil {
t.Log(buf.String())
t.Fatal("Test RemoveApp: failed to remove the app")
}
// Test update inexistent app
_, err = ds.UpdateApp(ctx, &models.App{
Name: testApp.Name,
Config: map[string]string{
"TEST": "1",
},
})
if err != models.ErrAppsNotFound {
t.Log(buf.String())
t.Fatalf("Test UpdateApp(inexistent): expected error `%v`, but it was `%v`", models.ErrAppsNotFound, err)
}
})
t.Run("routes", func(t *testing.T) {
// Insert app again to test routes
_, err := ds.InsertApp(ctx, testApp)
if err != nil && err != models.ErrAppsAlreadyExists {
t.Log(buf.String())
t.Fatalf("Test InsertRoute Prep: failed to insert app: ", err)
}
// Testing insert route
{
_, err = ds.InsertRoute(ctx, nil)
if err != models.ErrDatastoreEmptyRoute {
t.Log(buf.String())
t.Fatalf("Test InsertRoute(nil): expected error `%v`, but it was `%v`", models.ErrDatastoreEmptyRoute, err)
}
_, err = ds.InsertRoute(ctx, &models.Route{AppName: "notreal", Path: "/test"})
if err != models.ErrAppsNotFound {
t.Log(buf.String())
t.Fatalf("Test InsertRoute: expected error `%v`, but it was `%v`", models.ErrAppsNotFound, err)
}
_, err = ds.InsertRoute(ctx, testRoute)
if err != nil {
t.Log(buf.String())
t.Fatalf("Test InsertRoute: error when storing new route: %s", err)
}
_, err = ds.InsertRoute(ctx, testRoute)
if err != models.ErrRoutesAlreadyExists {
t.Log(buf.String())
t.Fatalf("Test InsertRoute duplicated: expected error to be `%v`, but it was `%v`", models.ErrRoutesAlreadyExists, err)
}
}
// Testing get
{
_, err = ds.GetRoute(ctx, "a", "")
if err != models.ErrDatastoreEmptyRoutePath {
t.Log(buf.String())
t.Fatalf("Test GetRoute(empty route path): expected error `%v`, but it was `%v`", models.ErrDatastoreEmptyRoutePath, err)
}
_, err = ds.GetRoute(ctx, "", "a")
if err != models.ErrDatastoreEmptyAppName {
t.Log(buf.String())
t.Fatalf("Test GetRoute(empty app name): expected error `%v`, but it was `%v`", models.ErrDatastoreEmptyAppName, err)
}
route, err := ds.GetRoute(ctx, testApp.Name, testRoute.Path)
if err != nil {
t.Log(buf.String())
t.Fatalf("Test GetRoute: unexpected error %v", err)
}
var expected models.Route = *testRoute
if !reflect.DeepEqual(*route, expected) {
t.Log(buf.String())
t.Fatalf("Test InsertApp: expected to insert:\n%v\nbut got:\n%v", expected, route)
}
}
// Testing update
{
// Update some fields, and add 3 configs and 3 headers.
updated, err := ds.UpdateRoute(ctx, &models.Route{
AppName: testRoute.AppName,
Path: testRoute.Path,
Timeout: 100,
Config: map[string]string{
"FIRST": "1",
"SECOND": "2",
"THIRD": "3",
},
Headers: http.Header{
"First": []string{"test"},
"Second": []string{"test", "test"},
"Third": []string{"test", "test2"},
},
})
if err != nil {
t.Log(buf.String())
t.Fatalf("Test UpdateRoute: unexpected error: %v", err)
}
expected := &models.Route{
// unchanged
AppName: testRoute.AppName,
Path: testRoute.Path,
Image: "iron/hello",
Type: "sync",
Format: "http",
// updated
Timeout: 100,
Config: map[string]string{
"FIRST": "1",
"SECOND": "2",
"THIRD": "3",
},
Headers: http.Header{
"First": []string{"test"},
"Second": []string{"test", "test"},
"Third": []string{"test", "test2"},
},
}
if !reflect.DeepEqual(*updated, *expected) {
t.Log(buf.String())
t.Fatalf("Test UpdateRoute: expected updated `%v` but got `%v`", expected, updated)
}
// Update a config var, remove another. Add one Header, remove another.
updated, err = ds.UpdateRoute(ctx, &models.Route{
AppName: testRoute.AppName,
Path: testRoute.Path,
Config: map[string]string{
"FIRST": "first",
"SECOND": "",
"THIRD": "3",
},
Headers: http.Header{
"First": []string{"test2"},
"Second": nil,
},
})
if err != nil {
t.Log(buf.String())
t.Fatalf("Test UpdateRoute: unexpected error: %v", err)
}
expected = &models.Route{
// unchanged
AppName: testRoute.AppName,
Path: testRoute.Path,
Image: "iron/hello",
Type: "sync",
Format: "http",
Timeout: 100,
// updated
Config: map[string]string{
"FIRST": "first",
"THIRD": "3",
},
Headers: http.Header{
"First": []string{"test", "test2"},
"Third": []string{"test", "test2"},
},
}
if !reflect.DeepEqual(*updated, *expected) {
t.Log(buf.String())
t.Fatalf("Test UpdateRoute: expected updated:\n`%v`\nbut got:\n`%v`", expected, updated)
}
}
// Testing list routes
routes, err := ds.GetRoutesByApp(ctx, testApp.Name, &models.RouteFilter{})
if err != nil {
t.Log(buf.String())
t.Fatalf("Test GetRoutesByApp: unexpected error %v", err)
}
if len(routes) == 0 {
t.Fatal("Test GetRoutesByApp: expected result count to be greater than 0")
}
if routes[0] == nil {
t.Log(buf.String())
t.Fatalf("Test GetRoutes: expected non-nil route")
} else if routes[0].Path != testRoute.Path {
t.Log(buf.String())
t.Fatalf("Test GetRoutes: expected `app.Name` to be `%s` but it was `%s`", testRoute.Path, routes[0].Path)
}
routes, err = ds.GetRoutesByApp(ctx, testApp.Name, &models.RouteFilter{Image: testRoute.Image})
if err != nil {
t.Log(buf.String())
t.Fatalf("Test GetRoutesByApp: unexpected error %v", err)
}
if len(routes) == 0 {
t.Fatal("Test GetRoutesByApp: expected result count to be greater than 0")
}
if routes[0] == nil {
t.Log(buf.String())
t.Fatalf("Test GetRoutes: expected non-nil route")
} else if routes[0].Path != testRoute.Path {
t.Log(buf.String())
t.Fatalf("Test GetRoutes: expected `app.Name` to be `%s` but it was `%s`", testRoute.Path, routes[0].Path)
}
routes, err = ds.GetRoutesByApp(ctx, "notreal", nil)
if err != nil {
t.Log(buf.String())
t.Fatalf("Test GetRoutesByApp: error: %s", err)
}
if len(routes) != 0 {
t.Fatalf("Test GetRoutesByApp: expected result count to be 0 but got %d", len(routes))
}
// Testing list routes
routes, err = ds.GetRoutes(ctx, &models.RouteFilter{Image: testRoute.Image})
if err != nil {
t.Log(buf.String())
t.Fatalf("Test GetRoutes: error: %s", err)
}
if len(routes) == 0 {
t.Fatal("Test GetRoutes: expected result count to be greater than 0")
}
if routes[0].Path != testRoute.Path {
t.Log(buf.String())
t.Fatalf("Test GetRoutes: expected `app.Name` to be `%s` but it was `%s`", testRoute.Path, routes[0].Path)
}
// Testing route delete
err = ds.RemoveRoute(ctx, "", "")
if err != models.ErrDatastoreEmptyAppName {
t.Log(buf.String())
t.Fatalf("Test RemoveRoute(empty app name): expected error `%v`, but it was `%v`", models.ErrDatastoreEmptyAppName, err)
}
err = ds.RemoveRoute(ctx, "a", "")
if err != models.ErrDatastoreEmptyRoutePath {
t.Log(buf.String())
t.Fatalf("Test RemoveRoute(empty route path): expected error `%v`, but it was `%v`", models.ErrDatastoreEmptyRoutePath, err)
}
err = ds.RemoveRoute(ctx, testRoute.AppName, testRoute.Path)
if err != nil {
t.Log(buf.String())
t.Fatalf("Test RemoveApp: unexpected error: %v", err)
}
route, err := ds.GetRoute(ctx, testRoute.AppName, testRoute.Path)
if err != nil && err != models.ErrRoutesNotFound {
t.Log(buf.String())
t.Fatalf("Test GetRoute: expected error `%v`, but it was `%v`", models.ErrRoutesNotFound, err)
}
if route != nil {
t.Log(buf.String())
t.Fatalf("Test RemoveApp: failed to remove the route: %v", route)
}
_, err = ds.UpdateRoute(ctx, &models.Route{
AppName: testRoute.AppName,
Path: testRoute.Path,
Image: "test",
})
if err != models.ErrRoutesNotFound {
t.Log(buf.String())
t.Fatalf("Test UpdateRoute inexistent: expected error to be `%v`, but it was `%v`", models.ErrRoutesNotFound, err)
}
})
t.Run("put-get", func(t *testing.T) {
// Testing Put/Get
err := ds.Put(ctx, nil, nil)
if err != models.ErrDatastoreEmptyKey {
t.Log(buf.String())
t.Fatalf("Test Put(nil,nil): expected error `%v`, but it was `%v`", models.ErrDatastoreEmptyKey, err)
}
err = ds.Put(ctx, []byte("test"), []byte("success"))
if err != nil {
t.Log(buf.String())
t.Fatalf("Test Put: unexpected error: %v", err)
}
val, err := ds.Get(ctx, []byte("test"))
if err != nil {
t.Log(buf.String())
t.Fatalf("Test Put: unexpected error: %v", err)
}
if string(val) != "success" {
t.Log(buf.String())
t.Fatalf("Test Get: expected value to be `%v`, but it was `%v`", "success", string(val))
}
err = ds.Put(ctx, []byte("test"), nil)
if err != nil {
t.Log(buf.String())
t.Fatalf("Test Put: unexpected error: %v", err)
}
val, err = ds.Get(ctx, []byte("test"))
if err != nil {
t.Log(buf.String())
t.Fatalf("Test Put: unexpected error: %v", err)
}
if string(val) != "" {
t.Log(buf.String())
t.Fatalf("Test Get: expected value to be `%v`, but it was `%v`", "", string(val))
}
})
}
var testApp = &models.App{
Name: "Test",
}
var testRoute = &models.Route{
AppName: testApp.Name,
Path: "/test",
Image: "iron/hello",
Type: "sync",
Format: "http",
}
|
[
"\"DOCKER_HOST\""
] |
[] |
[
"DOCKER_HOST"
] |
[]
|
["DOCKER_HOST"]
|
go
| 1 | 0 | |
publisher/src/main/java/com/jbariel/example/wss/publisher/Publisher.java
|
package com.jbariel.example.wss.publisher;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.InetSocketAddress;
import java.time.ZonedDateTime;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.math.NumberUtils;
import org.java_websocket.WebSocket;
import org.java_websocket.handshake.ClientHandshake;
import org.java_websocket.server.WebSocketServer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class Publisher {
private final static Logger log = LoggerFactory.getLogger(Publisher.class);
private static ScheduledExecutorService executor = Executors.newScheduledThreadPool(2);
private static int WSS_PORT = NumberUtils.toInt(System.getenv("WSS_PORT"), 9001);;
public static void main(final String[] args) {
log.info("Starting Publisher...");
final WsServer server = new WsServer();
final Thread sThread = new Thread(server::run);
sThread.start();
final ScheduledFuture<?> future = executor.scheduleAtFixedRate(server::tick, 2, 1, TimeUnit.SECONDS);
log.debug("Reading console...");
log.info("\tType 'exit' to quit");
log.info("\tOther typed messages will broadcast");
log.info("What would you like to say?");
try (BufferedReader in = new BufferedReader(new InputStreamReader((System.in)))) {
while (true) {
String msg = StringUtils.trimToNull(in.readLine());
if (null != msg) {
server.broadcast(msg);
if ("exit".equalsIgnoreCase(msg)) {
future.cancel(false);
server.stop(1000);
break;
}
}
}
} catch (IOException | InterruptedException ex) {
log.error(ex.getLocalizedMessage(), ex);
}
System.exit(0);
}
static class WsServer extends WebSocketServer {
private final Logger log = LoggerFactory.getLogger(WsServer.class);
public WsServer() {
super(new InetSocketAddress(Publisher.WSS_PORT));
}
ZonedDateTime currTime = ZonedDateTime.now();
@Override
public void onOpen(WebSocket conn, ClientHandshake handshake) {
log.info(String.format("Connected [%s]", conn));
conn.send(asString(currTime));
}
@Override
public void onClose(WebSocket conn, int code, String reason, boolean remote) {
log.info(String.format("CLOSED CONNECTION [%s] [%d]: %s", conn, code, reason));
}
@Override
public void onMessage(WebSocket conn, String message) {
log.info("[RECIEVED MESSAGE] " + message);
broadcast(message);
}
@Override
public void onError(WebSocket conn, Exception ex) {
log.error(ex.getLocalizedMessage(), ex);
}
@Override
public void onStart() {
log.info("Started WSS on port " + WSS_PORT);
setConnectionLostTimeout(5);
}
@Override
public void broadcast(String text) {
log.info("[BROADCASTING] " + text);
super.broadcast(text);
}
public void tick() {
currTime = ZonedDateTime.now();
broadcast(currTime);
}
public void broadcast(ZonedDateTime time) {
broadcast(asString(time));
}
protected String asString(ZonedDateTime time) {
return time.toString();
}
}
}
|
[
"\"WSS_PORT\""
] |
[] |
[
"WSS_PORT"
] |
[]
|
["WSS_PORT"]
|
java
| 1 | 0 | |
pytest-embedded-idf/pytest_embedded_idf/app.py
|
import json
import logging
import os
import subprocess
import sys
from typing import Any, Dict, List, Optional, Tuple
from pytest_embedded.app import App
class IdfApp(App):
"""
Idf App class
Attributes:
app_path (str): App path
binary_path (str): binary file path
elf_file (str): elf file path
parttool_path (str): partition tool path
sdkconfig (dict[str, str]): dict contains all k-v pairs from the sdkconfig file
flash_files (list[Tuple[int, str, str]]): list of (offset, file path, encrypted) of files need to be flashed in
flash_settings (dict[str, Any]): dict of flash settings
partition_table (dict[str, Any]): dict generated by partition tool
"""
FLASH_ARGS_FILENAME = 'flasher_args.json'
def __init__(
self,
app_path: Optional[str] = None,
build_dir: Optional[str] = None,
part_tool: Optional[str] = None,
**kwargs,
):
"""
Args:
app_path: App path
build_dir: Build directory
part_tool: Partition tool path
"""
super().__init__(app_path, build_dir, **kwargs)
self.binary_path = self._get_binary_path(build_dir or 'build')
if not self.binary_path:
logging.warning('Binary path not specified, skipping parsing app...')
return
self.elf_file = self._get_elf_file()
self.parttool_path = self._get_parttool_file(part_tool)
self.flash_files, self.flash_settings = self._parse_flash_args()
self.partition_table = self._parse_partition_table()
self.sdkconfig = self._parse_sdkconfig()
if not self.sdkconfig:
return
self.target = self._get_target_from_sdkconfig()
def _get_binary_path(self, build_dir: str) -> Optional[str]:
if os.path.isdir(build_dir):
return os.path.realpath(build_dir)
logging.debug(f'{build_dir} doesn\'t exist. Treat as relative path...')
path = os.path.join(self.app_path, build_dir)
if os.path.isdir(path):
return path
logging.warning(f'{path} doesn\'t exist.')
return None
def _get_elf_file(self) -> Optional[str]:
for fn in os.listdir(self.binary_path):
if os.path.splitext(fn)[-1] == '.elf':
return os.path.realpath(os.path.join(self.binary_path, fn))
return None
def _parse_sdkconfig(self) -> Optional[Dict[str, Any]]:
sdkconfig_json_path = os.path.join(self.binary_path, 'config', 'sdkconfig.json')
if not os.path.isfile(sdkconfig_json_path):
logging.warning(f'{sdkconfig_json_path} doesn\'t exist. Skipping...')
return None
return json.load(open(sdkconfig_json_path))
def _get_flash_args_file(self) -> Optional[str]:
for fn in os.listdir(self.binary_path):
if fn == self.FLASH_ARGS_FILENAME:
return os.path.realpath(os.path.join(self.binary_path, fn))
return None
def _is_encrypted(self, flash_args: Dict[str, Any], offset: int, file_path: str):
for entry in flash_args.values():
try:
if (entry['offset'], entry['file']) == (offset, file_path):
return entry['encrypted'] == 'true'
except (TypeError, KeyError):
continue
return False
def _parse_flash_args(
self,
) -> Tuple[Optional[List[Tuple[int, str, bool]]], Optional[Dict[str, Any]]]:
"""
Returns:
(flash_files: [(offset, file_path, encrypted), ...], flash_settings: dict[str, str])
"""
flash_args_filepath = self._get_flash_args_file()
if not flash_args_filepath:
return None, None
with open(flash_args_filepath) as fr:
flash_args = json.load(fr)
res = []
for (offset, file_path) in flash_args['flash_files'].items():
encrypted = self._is_encrypted(flash_args, offset, file_path)
res.append((int(offset, 0), os.path.join(self.binary_path, file_path), encrypted))
flash_files = sorted(res)
flash_settings = flash_args['flash_settings']
flash_settings['encrypt'] = any([file[2] for file in res])
return flash_files, flash_settings
def _get_parttool_file(self, parttool: Optional[str]) -> Optional[str]:
parttool_filepath = parttool or os.path.join(
os.getenv('IDF_PATH', ''),
'components',
'partition_table',
'gen_esp32part.py',
)
if os.path.isfile(parttool_filepath):
return os.path.realpath(parttool_filepath)
logging.warning('Partition Tool not found. (Default: $IDF_PATH/components/partition_table/gen_esp32part.py)')
return None
def _parse_partition_table(self) -> Optional[Dict[str, Any]]:
if not (self.parttool_path and self.flash_files):
return None
errors = []
for _, file, _ in self.flash_files:
if 'partition' in os.path.split(file)[1]:
partition_file = os.path.join(self.binary_path, file)
process = subprocess.Popen(
[sys.executable, self.parttool_path, partition_file],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = process.communicate()
raw_data = stdout.decode() if isinstance(stdout, bytes) else stdout
raw_error = stderr.decode() if isinstance(stderr, bytes) else stderr
if 'Traceback' in raw_error:
# Some exception occurred. It is possible that we've tried the wrong binary file.
errors.append((file, raw_error))
continue
break
else:
traceback_msg = '\n'.join([f'{self.parttool_path} {p}:{os.linesep}{msg}' for p, msg in errors])
raise ValueError(f'No partition table found under {self.binary_path}\n' f'{traceback_msg}')
partition_table = {}
for line in raw_data.splitlines():
if line[0] != '#':
try:
_name, _type, _subtype, _offset, _size, _flags = line.split(',')
if _size[-1] == 'K':
_size = int(_size[:-1]) * 1024
elif _size[-1] == 'M':
_size = int(_size[:-1]) * 1024 * 1024
else:
_size = int(_size)
_offset = int(_offset, 0)
except ValueError:
continue
partition_table[_name] = {
'type': _type,
'subtype': _subtype,
'offset': _offset,
'size': _size,
'flags': _flags,
}
return partition_table
def _get_target_from_sdkconfig(self):
return self.sdkconfig.get('IDF_TARGET', 'esp32')
|
[] |
[] |
[
"IDF_PATH"
] |
[]
|
["IDF_PATH"]
|
python
| 1 | 0 | |
share/qt/extract_strings_qt.py
|
#!/usr/bin/env python
# Copyright (c) 2012-2016 The ACB coin bt developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import operator
import os
import sys
OUT_CPP="qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings_qt.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
f.write('QT_TRANSLATE_NOOP("bitcoin-core", "%s"),\n' % (os.getenv('PACKAGE_NAME'),))
f.write('QT_TRANSLATE_NOOP("bitcoin-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS'),))
if os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION') != os.getenv('PACKAGE_NAME'):
f.write('QT_TRANSLATE_NOOP("bitcoin-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION'),))
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
|
[] |
[] |
[
"COPYRIGHT_HOLDERS",
"PACKAGE_NAME",
"XGETTEXT",
"COPYRIGHT_HOLDERS_SUBSTITUTION"
] |
[]
|
["COPYRIGHT_HOLDERS", "PACKAGE_NAME", "XGETTEXT", "COPYRIGHT_HOLDERS_SUBSTITUTION"]
|
python
| 4 | 0 | |
src/productcatalogservice/server.go
|
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bytes"
"context"
"flag"
"fmt"
"io/ioutil"
"net"
"os"
"os/signal"
"strings"
"sync"
"syscall"
"time"
"github.com/golang/protobuf/jsonpb"
pb "github.com/GoogleCloudPlatform/microservices-demo/src/productcatalogservice/genproto"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
"github.com/lightstep/otel-launcher-go/launcher"
"github.com/sirupsen/logrus"
grpcotel "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/label"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/trace"
)
var (
cat pb.ListProductsResponse
catalogMutex *sync.Mutex
log *logrus.Logger
extraLatency time.Duration
port = "3550"
reloadCatalog bool
meter = otel.Meter("productcatalogservice/metrics")
gpLock = new(sync.RWMutex)
gpValue = new(float64)
gpLabels = new([]label.KeyValue)
getProductsObserver = metric.Must(meter).NewFloat64ValueObserver("catalog.getProducts.time", func(ctx context.Context, result metric.Float64ObserverResult) {
(*gpLock).RLock()
value := *gpValue
labels := *gpLabels
(*gpLock).RUnlock()
result.Observe(value, labels...)
})
)
func init() {
log = logrus.New()
log.Formatter = &logrus.JSONFormatter{
FieldMap: logrus.FieldMap{
logrus.FieldKeyTime: "timestamp",
logrus.FieldKeyLevel: "severity",
logrus.FieldKeyMsg: "message",
},
TimestampFormat: time.RFC3339Nano,
}
log.Out = os.Stdout
catalogMutex = &sync.Mutex{}
err := readCatalogFile(&cat)
if err != nil {
log.Warnf("could not parse product catalog")
}
}
func main() {
otel := initLightstepTracing(log)
defer otel.Shutdown()
flag.Parse()
// set injected latency
if s := os.Getenv("EXTRA_LATENCY"); s != "" {
v, err := time.ParseDuration(s)
if err != nil {
log.Fatalf("failed to parse EXTRA_LATENCY (%s) as time.Duration: %+v", v, err)
}
extraLatency = v
log.Infof("extra latency enabled (duration: %v)", extraLatency)
} else {
extraLatency = time.Duration(0)
}
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGUSR1, syscall.SIGUSR2)
go func() {
for {
sig := <-sigs
log.Printf("Received signal: %s", sig)
if sig == syscall.SIGUSR1 {
reloadCatalog = true
log.Infof("Enable catalog reloading")
} else {
reloadCatalog = false
log.Infof("Disable catalog reloading")
}
}
}()
if os.Getenv("PORT") != "" {
port = os.Getenv("PORT")
}
log.Infof("starting grpc server at :%s", port)
run(port)
select {}
}
func run(port string) string {
l, err := net.Listen("tcp", fmt.Sprintf(":%s", port))
if err != nil {
log.Fatal(err)
}
srv := grpc.NewServer(
grpc.UnaryInterceptor(grpcotel.UnaryServerInterceptor()),
grpc.StreamInterceptor(grpcotel.StreamServerInterceptor()),
)
svc := &productCatalog{}
pb.RegisterProductCatalogServiceServer(srv, svc)
healthpb.RegisterHealthServer(srv, svc)
go srv.Serve(l)
return l.Addr().String()
}
func initLightstepTracing(log logrus.FieldLogger) launcher.Launcher {
launcher := launcher.ConfigureOpentelemetry(
launcher.WithLogLevel("debug"),
launcher.WithLogger(log),
)
log.Info("Initialized Lightstep OpenTelemetry launcher")
return launcher
}
type productCatalog struct{}
func readCatalogFile(catalog *pb.ListProductsResponse) error {
catalogMutex.Lock()
defer catalogMutex.Unlock()
catalogJSON, err := ioutil.ReadFile("products.json")
if err != nil {
log.Fatalf("failed to open product catalog json file: %v", err)
return err
}
if err := jsonpb.Unmarshal(bytes.NewReader(catalogJSON), catalog); err != nil {
log.Warnf("failed to parse the catalog JSON: %v", err)
return err
}
log.Info("successfully parsed product catalog json")
return nil
}
func parseCatalog() []*pb.Product {
if reloadCatalog || len(cat.Products) == 0 {
err := readCatalogFile(&cat)
if err != nil {
return []*pb.Product{}
}
}
return cat.Products
}
func (p *productCatalog) Check(ctx context.Context, req *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
return &healthpb.HealthCheckResponse{Status: healthpb.HealthCheckResponse_SERVING}, nil
}
func (p *productCatalog) Watch(req *healthpb.HealthCheckRequest, ws healthpb.Health_WatchServer) error {
return status.Errorf(codes.Unimplemented, "health check via Watch not implemented")
}
func (p *productCatalog) ListProducts(context.Context, *pb.Empty) (*pb.ListProductsResponse, error) {
time.Sleep(extraLatency)
return &pb.ListProductsResponse{Products: parseCatalog()}, nil
}
func (p *productCatalog) GetProduct(ctx context.Context, req *pb.GetProductRequest) (*pb.Product, error) {
trace.SpanFromContext(ctx).SetAttributes(label.String("productId", req.Id))
ts := time.Now()
time.Sleep(extraLatency)
var found *pb.Product
for i := 0; i < len(parseCatalog()); i++ {
if req.Id == parseCatalog()[i].Id {
found = parseCatalog()[i]
}
}
if found == nil {
trace.SpanFromContext(ctx).SetAttributes(label.Bool("error", true))
return nil, status.Errorf(codes.NotFound, "no product with ID %s", req.Id)
}
elapsed := time.Since(ts)
(*gpLock).Lock()
*gpValue = elapsed.Seconds()
*gpLabels = []label.KeyValue{label.String("productId", found.Id)}
(*gpLock).Unlock()
return found, nil
}
func (p *productCatalog) SearchProducts(ctx context.Context, req *pb.SearchProductsRequest) (*pb.SearchProductsResponse, error) {
time.Sleep(extraLatency)
// Intepret query as a substring match in name or description.
var ps []*pb.Product
for _, p := range parseCatalog() {
if strings.Contains(strings.ToLower(p.Name), strings.ToLower(req.Query)) ||
strings.Contains(strings.ToLower(p.Description), strings.ToLower(req.Query)) {
ps = append(ps, p)
}
}
return &pb.SearchProductsResponse{Results: ps}, nil
}
|
[
"\"EXTRA_LATENCY\"",
"\"PORT\"",
"\"PORT\""
] |
[] |
[
"PORT",
"EXTRA_LATENCY"
] |
[]
|
["PORT", "EXTRA_LATENCY"]
|
go
| 2 | 0 | |
packages/services/optimism/l2geth/eth/config.go
|
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"math/big"
"os"
"os/user"
"path/filepath"
"runtime"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rollup"
)
// DefaultConfig contains default settings for use on the Ethereum main net.
var DefaultConfig = Config{
SyncMode: downloader.FastSync,
Ethash: ethash.Config{
CacheDir: "ethash",
CachesInMem: 2,
CachesOnDisk: 3,
DatasetsInMem: 1,
DatasetsOnDisk: 2,
},
NetworkId: 1,
LightPeers: 100,
UltraLightFraction: 75,
DatabaseCache: 512,
TrieCleanCache: 256,
TrieDirtyCache: 256,
TrieTimeout: 60 * time.Minute,
Miner: miner.Config{
GasFloor: 8000000,
GasCeil: 8000000,
GasPrice: big.NewInt(params.GWei),
Recommit: 3 * time.Second,
},
TxPool: core.DefaultTxPoolConfig,
GPO: gasprice.Config{
Blocks: 20,
Percentile: 60,
},
Rollup: rollup.Config{
StateDumpPath: "https://raw.githubusercontent.com/ethereum-optimism/regenesis/master/master.json",
// The max size of a transaction that is sent over the p2p network is 128kb
// https://github.com/ethereum/go-ethereum/blob/c2d2f4ed8f232bb11663a1b01a2e578aa22f24bd/core/tx_pool.go#L51
// The batch overhead is:
// 4 bytes function selector
// 5 bytes shouldStartAtElement
// 3 bytes totalElementsToAppend
// 3 bytes context header
// 16 bytes for a single batch context
// 3 bytes for tx size
// the rest of the data can be used for the transaction
// Therefore, the max safe tx size to accept via the sequencer is:
// 128000 - (5+3+3+16+4+3) = 127966
// The mempool would need to be bypassed if a transaction any larger was
// accepted. This option applies to the transaction calldata, so there
// is additional overhead that is unaccounted. Round down to 127000 for
// safety.
MaxCallDataSize: 127000,
DataPrice: big.NewInt(100 * params.GWei),
ExecutionPrice: big.NewInt(0),
},
DiffDbCache: 256,
}
func init() {
home := os.Getenv("HOME")
if home == "" {
if user, err := user.Current(); err == nil {
home = user.HomeDir
}
}
if runtime.GOOS == "darwin" {
DefaultConfig.Ethash.DatasetDir = filepath.Join(home, "Library", "Ethash")
} else if runtime.GOOS == "windows" {
localappdata := os.Getenv("LOCALAPPDATA")
if localappdata != "" {
DefaultConfig.Ethash.DatasetDir = filepath.Join(localappdata, "Ethash")
} else {
DefaultConfig.Ethash.DatasetDir = filepath.Join(home, "AppData", "Local", "Ethash")
}
} else {
DefaultConfig.Ethash.DatasetDir = filepath.Join(home, ".ethash")
}
}
//go:generate gencodec -type Config -formats toml -out gen_config.go
type Config struct {
// The genesis block, which is inserted if the database is empty.
// If nil, the Ethereum main net block is used.
Genesis *core.Genesis `toml:",omitempty"`
// Protocol options
NetworkId uint64 // Network ID to use for selecting peers to connect to
SyncMode downloader.SyncMode
NoPruning bool // Whether to disable pruning and flush everything to disk
NoPrefetch bool // Whether to disable prefetching and only load state on demand
// Whitelist of required block number -> hash values to accept
Whitelist map[uint64]common.Hash `toml:"-"`
// Light client options
LightServ int `toml:",omitempty"` // Maximum percentage of time allowed for serving LES requests
LightIngress int `toml:",omitempty"` // Incoming bandwidth limit for light servers
LightEgress int `toml:",omitempty"` // Outgoing bandwidth limit for light servers
LightPeers int `toml:",omitempty"` // Maximum number of LES client peers
// Ultra Light client options
UltraLightServers []string `toml:",omitempty"` // List of trusted ultra light servers
UltraLightFraction int `toml:",omitempty"` // Percentage of trusted servers to accept an announcement
UltraLightOnlyAnnounce bool `toml:",omitempty"` // Whether to only announce headers, or also serve them
// Database options
SkipBcVersionCheck bool `toml:"-"`
DatabaseHandles int `toml:"-"`
DatabaseCache int
DatabaseFreezer string
DiffDbCache uint64
TrieCleanCache int
TrieDirtyCache int
TrieTimeout time.Duration
// Mining options
Miner miner.Config
// Ethash options
Ethash ethash.Config
// Transaction pool options
TxPool core.TxPoolConfig
// Gas Price Oracle options
GPO gasprice.Config
// Enables tracking of SHA3 preimages in the VM
EnablePreimageRecording bool
// Miscellaneous options
DocRoot string `toml:"-"`
// Type of the EWASM interpreter ("" for default)
EWASMInterpreter string
// Type of the EVM interpreter ("" for default)
EVMInterpreter string
// RPCGasCap is the global gas cap for eth-call variants.
RPCGasCap *big.Int `toml:",omitempty"`
// Checkpoint is a hardcoded checkpoint which can be nil.
Checkpoint *params.TrustedCheckpoint `toml:",omitempty"`
// CheckpointOracle is the configuration for checkpoint oracle.
CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
// Istanbul block override (TODO: remove after the fork)
OverrideIstanbul *big.Int
// MuirGlacier block override (TODO: remove after the fork)
OverrideMuirGlacier *big.Int
// Optimism Rollup Config
Rollup rollup.Config
}
|
[
"\"HOME\"",
"\"LOCALAPPDATA\""
] |
[] |
[
"HOME",
"LOCALAPPDATA"
] |
[]
|
["HOME", "LOCALAPPDATA"]
|
go
| 2 | 0 | |
pkg/teem/teem.go
|
package teem
import (
"fmt"
"os"
"sync"
log "github.com/F5Networks/k8s-bigip-ctlr/pkg/vlogger"
"github.com/f5devcentral/go-bigip/f5teem"
"github.com/google/uuid"
)
// ResourceTypes structure maintains a map of namespaces to resource count
type ResourceTypes struct {
Ingresses map[string]int
Routes map[string]int
Configmaps map[string]int
VirtualServer map[string]int
TransportServer map[string]int
ExternalDNS map[string]int
IngressLink map[string]int
IPAMVS map[string]int
IPAMTS map[string]int
IPAMSvcLB map[string]int
}
// TeemsData structure contains supporting data to be posted to TEEM's server
type TeemsData struct {
sync.Mutex
CisVersion string
SDNType string
Agent string
PoolMemberType string
DateOfCISDeploy string
PlatformInfo string
ResourceType ResourceTypes
AccessEnabled bool // Will be set to false if network rules don't permit
}
const (
TOTAL = "total"
staging = "staging"
production = "production"
)
// PostTeemsData posts data to TEEM server and returns a boolean response useful to decide if network rules permit to access server
func (td *TeemsData) PostTeemsData() bool {
if !td.AccessEnabled {
return false
}
apiEnv := os.Getenv("TEEM_API_ENVIRONMENT")
var apiKey string
if apiEnv != "" {
if apiEnv == staging {
apiKey = os.Getenv("TEEM_API_KEY")
if len(apiKey) == 0 {
log.Error("API key missing to post to staging teem server")
return false
}
} else if apiEnv != production {
log.Error("Invalid TEEM_API_ENVIRONMENT. Unset to use production server")
return false
}
}
// Retry only once upon failure
var retryCount = 1
var accessEnabled = true
assetInfo := f5teem.AssetInfo{
Name: "CIS-Ecosystem",
Version: fmt.Sprintf("CIS/v%v", td.CisVersion),
Id: uuid.New().String(),
}
teemDevice := f5teem.AnonymousClient(assetInfo, apiKey)
types := []map[string]int{td.ResourceType.IngressLink, td.ResourceType.Ingresses, td.ResourceType.Routes,
td.ResourceType.Configmaps, td.ResourceType.VirtualServer, td.ResourceType.TransportServer,
td.ResourceType.ExternalDNS, td.ResourceType.IPAMVS, td.ResourceType.IPAMTS, td.ResourceType.IPAMSvcLB}
var sum int
for _, rscType := range types {
sum = 0
rscType[TOTAL] = 0 // Reset previous iteration sum
for _, count := range rscType {
sum += count
}
rscType[TOTAL] = sum
}
data := map[string]interface{}{
"PlatformInfo": td.PlatformInfo,
"Agent": td.Agent,
"DateOfCISDeploy": td.DateOfCISDeploy,
"Mode": td.PoolMemberType,
"SDNType": td.SDNType,
"IngressCount": td.ResourceType.Ingresses[TOTAL],
"RoutesCount": td.ResourceType.Routes[TOTAL],
"ConfigmapsCount": td.ResourceType.Configmaps[TOTAL],
"VirtualServerCount": td.ResourceType.VirtualServer[TOTAL],
"TransportServerCount": td.ResourceType.TransportServer[TOTAL],
"ExternalDNSCount": td.ResourceType.ExternalDNS[TOTAL],
"IngressLinkCount": td.ResourceType.IngressLink[TOTAL],
"IPAMVirtualServerCount": td.ResourceType.IPAMVS[TOTAL],
"IPAMTransportServerCount": td.ResourceType.IPAMTS[TOTAL],
"IPAMSvcLBCount": td.ResourceType.IPAMSvcLB[TOTAL],
}
for retryCount >= 0 {
err := teemDevice.Report(data, "CIS Telemetry Data", "1")
if err != nil {
log.Errorf("Error reporting telemetry data :%v", err)
retryCount--
if retryCount < 0 {
accessEnabled = false
}
} else {
retryCount = -1
}
}
return accessEnabled
}
|
[
"\"TEEM_API_ENVIRONMENT\"",
"\"TEEM_API_KEY\""
] |
[] |
[
"TEEM_API_KEY",
"TEEM_API_ENVIRONMENT"
] |
[]
|
["TEEM_API_KEY", "TEEM_API_ENVIRONMENT"]
|
go
| 2 | 0 | |
app/bin/srv.py
|
import logging
import os
import re
import requests
import socket
import time
import sys
import geoip2.database
from geopy.geocoders import Nominatim
import jinja2
from gevent.pywsgi import WSGIServer
from gevent.monkey import patch_all
from gevent.subprocess import Popen, PIPE
patch_all()
import dns.resolver
from dns.exception import DNSException
from flask import Flask, request, render_template, send_from_directory
app = Flask(__name__)
MYDIR = os.environ.get('WTTR_MYDIR', os.path.abspath(os.path.dirname( os.path.dirname('__file__') )))
GEOLITE = os.environ.get('WTTR_GEOLITE', os.path.join( MYDIR, "GeoLite2-City.mmdb" ))
WEGO = os.environ.get('WTTR_WEGO', "/home/igor/go/bin/wego")
LISTEN_HOST = os.environ.get('WTTR_LISTEN_HOST', "127.0.0.1")
LISTEN_PORT = int(os.environ.get('WTTR_LISTEN_PORT', "8002"))
CACHEDIR = os.path.join( MYDIR, "cache" )
IP2LCACHE = os.path.join( MYDIR, "cache/ip2l" )
ALIASES = os.path.join( MYDIR, "share/aliases" )
ANSI2HTML = os.path.join( MYDIR, "share/ansi2html.sh" )
HELP_FILE = os.path.join( MYDIR, 'share/help.txt' )
LOG_FILE = os.path.join( MYDIR, 'log/main.log' )
TEMPLATES = os.path.join( MYDIR, 'share/templates' )
STATIC = os.path.join( MYDIR, 'share/static' )
NOT_FOUND_LOCATION = "NOT_FOUND"
DEFAULT_LOCATION = "Oymyakon"
NOT_FOUND_MESSAGE = """
We were unable to find your location,
so we have brought you to Oymyakon,
one of the coldest permanently inhabited locales on the planet.
"""
PLAIN_TEXT_AGENTS = [
"curl",
"httpie",
"lwp-request",
"wget",
"python-requests"
]
if not os.path.exists(os.path.dirname( LOG_FILE )):
os.makedirs( os.path.dirname( LOG_FILE ) )
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
reader = geoip2.database.Reader(GEOLITE)
geolocator = Nominatim()
my_loader = jinja2.ChoiceLoader([
app.jinja_loader,
jinja2.FileSystemLoader(TEMPLATES),
])
app.jinja_loader = my_loader
class Limits:
def __init__( self ):
self.intervals = [ 'min', 'hour', 'day' ]
self.divisor = {
'min': 60,
'hour': 3600,
'day': 86400,
}
self.counter = {
'min': {},
'hour': {},
'day': {},
}
self.limit = {
'min': 10,
'hour': 20,
'day': 100,
}
self.last_update = {
'min': 0,
'hour': 0,
'day': 0,
}
self.clear_counters()
def check_ip( self, ip ):
self.clear_counters()
for interval in self.intervals:
if ip not in self.counter[interval]:
self.counter[interval][ip] = 0
self.counter[interval][ip] += 1
if self.limit[interval] <= self.counter[interval][ip]:
log("Too many queries: %s in %s for %s" % (self.limit[interval], interval, ip) )
raise RuntimeError("Not so fast! Number of queries per %s is limited to %s" % (interval, self.limit[interval]))
print self.counter
def clear_counters( self ):
t = int( time.time() )
for interval in self.intervals:
if t / self.divisor[interval] != self.last_update[interval]:
self.counter[interval] = {}
self.last_update[interval] = t / self.divisor[interval]
limits = Limits()
def error( text ):
print text
raise RuntimeError(text)
def log( text ):
print text.encode('utf-8')
logging.info( text.encode('utf-8') )
def is_ip( ip ):
if re.match('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', ip) is None:
return False
try:
socket.inet_aton(ip)
return True
except socket.error:
return False
def save_weather_data( location, filename ):
if location == NOT_FOUND_LOCATION:
location_not_found = True
location = DEFAULT_LOCATION
else:
location_not_found = False
p = Popen( [ WEGO, '-location=%s' % location ], stdout=PIPE, stderr=PIPE )
stdout, stderr = p.communicate()
if p.returncode != 0:
error( stdout + stderr )
dirname = os.path.dirname( filename )
if not os.path.exists( dirname ):
os.makedirs( dirname )
if location_not_found:
stdout += NOT_FOUND_MESSAGE
open( filename, 'w' ).write( stdout )
p = Popen( [ "bash", ANSI2HTML, "--palette=solarized", "--bg=dark" ], stdin=PIPE, stdout=PIPE, stderr=PIPE )
stdout, stderr = p.communicate( stdout )
if p.returncode != 0:
error( stdout + stderr )
open( filename+'.html', 'w' ).write( stdout )
def get_filename( location ):
location = location.replace('/', '_')
timestamp = time.strftime( "%Y%m%d%H", time.localtime() )
return "%s/%s/%s" % ( CACHEDIR, location, timestamp )
def get_wetter(location, ip, html=False):
filename = get_filename( location )
if not os.path.exists( filename ):
limits.check_ip( ip )
save_weather_data( location, filename )
if html:
filename += '.html'
return open(filename).read()
def ip2location( ip ):
cached = os.path.join( IP2LCACHE, ip )
if os.path.exists( cached ):
return open( cached, 'r' ).read()
try:
t = requests.get( 'http://api.ip2location.com/?ip=%s&key=demo&package=WS10' % ip ).text
if ';' in t:
location = t.split(';')[3]
if not os.path.exists( IP2LCACHE ):
os.makedirs( IP2LCACHE )
open( cached, 'w' ).write( location )
return location
except:
pass
def get_location( ip_addr ):
response = reader.city( ip_addr )
city = response.city.name
if city is None and response.location:
coord = "%s, %s" % (response.location.latitude, response.location.longitude)
location = geolocator.reverse(coord, language='en')
city = location.raw.get('address', {}).get('city')
if city is None:
print ip_addr
city = ip2location( ip_addr )
return city or NOT_FOUND_LOCATION
def load_aliases( aliases_filename ):
aliases_db = {}
with open( aliases_filename, 'r' ) as f:
for line in f.readlines():
from_, to_ = line.split(':', 1)
aliases_db[ from_.strip().lower() ] = to_.strip()
return aliases_db
location_alias = load_aliases( ALIASES )
def location_canonical_name( location ):
if location.lower() in location_alias:
return location_alias[location.lower()]
return location
def show_help():
return open(HELP_FILE, 'r').read()
@app.route('/files/<path:path>')
def send_static(path):
return send_from_directory(STATIC, path)
@app.route('/favicon.ico')
def send_favicon():
return send_from_directory(STATIC, 'favicon.ico')
@app.route("/")
@app.route("/<string:location>")
def wttr(location = None):
user_agent = request.headers.get('User-Agent').lower()
if any(agent in user_agent for agent in PLAIN_TEXT_AGENTS):
html_output = False
else:
html_output = True
if location == ':help':
help_ = show_help()
if html_output:
return render_template( 'index.html', body=help_ )
else:
return help_
orig_location = location
if request.headers.getlist("X-Forwarded-For"):
ip = request.headers.getlist("X-Forwarded-For")[0]
if ip.startswith('::ffff:'):
ip = ip[7:]
else:
ip = request.remote_addr
try:
if location is None:
location = get_location( ip )
if is_ip( location ):
location = get_location( location )
if location.startswith('@'):
try:
loc = dns.resolver.query( location[1:], 'LOC' )
location = str("%.7f,%.7f" % (loc[0].float_latitude, loc[0].float_longitude))
except DNSException, e:
location = get_location( socket.gethostbyname( location[1:] ) )
location = location_canonical_name( location )
log("%s %s %s %s" % (ip, user_agent, orig_location, location))
return get_wetter( location, ip, html=html_output )
except Exception, e:
logging.error("Exception has occurred", exc_info=1)
return str(e).rstrip()+"\n"
server = WSGIServer((LISTEN_HOST, LISTEN_PORT), app)
server.serve_forever()
|
[] |
[] |
[
"WTTR_LISTEN_HOST",
"WTTR_GEOLITE",
"WTTR_WEGO",
"WTTR_LISTEN_PORT",
"WTTR_MYDIR"
] |
[]
|
["WTTR_LISTEN_HOST", "WTTR_GEOLITE", "WTTR_WEGO", "WTTR_LISTEN_PORT", "WTTR_MYDIR"]
|
python
| 5 | 0 | |
cmd/get-code/main.go
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Exchanges a verification code for a verification token.
package main
import (
"context"
"flag"
"fmt"
"os"
"strconv"
"time"
"github.com/google/exposure-notifications-verification-server/pkg/clients"
"github.com/google/exposure-notifications-server/pkg/logging"
"github.com/sethvargo/go-signalcontext"
)
var (
testFlag = flag.String("type", "", "diagnosis test type: confirmed, likely, negative")
onsetFlag = flag.String("onset", "", "Symptom onset date, YYYY-MM-DD format")
apikeyFlag = flag.String("apikey", "", "API Key to use")
addrFlag = flag.String("addr", "http://localhost:8080", "protocol, address and port on which to make the API call")
timeoutFlag = flag.Duration("timeout", 5*time.Second, "request time out duration in the format: 0h0m0s")
)
func main() {
flag.Parse()
ctx, done := signalcontext.OnInterrupt()
debug, _ := strconv.ParseBool(os.Getenv("LOG_DEBUG"))
logger := logging.NewLogger(debug)
ctx = logging.WithLogger(ctx, logger)
err := realMain(ctx)
done()
if err != nil {
logger.Fatal(err)
}
}
func realMain(ctx context.Context) error {
logger := logging.FromContext(ctx)
request, response, err := clients.IssueCode(ctx, *addrFlag, *apikeyFlag, *testFlag, *onsetFlag, *timeoutFlag)
logger.Infow("sent request", "request", request)
if err != nil {
return fmt.Errorf("failed to get token: %w", err)
}
logger.Infow("got response", "response", response)
return nil
}
|
[
"\"LOG_DEBUG\""
] |
[] |
[
"LOG_DEBUG"
] |
[]
|
["LOG_DEBUG"]
|
go
| 1 | 0 | |
request/alipay_pass_instance_update.go
|
package request
const AlipayPassInstanceUpdateMethod = "alipay.pass.instance.update"
type AlipayPassInstanceUpdateRequest struct {
SerialNumber string `json:"serial_number"`
ChannelId string `json:"channel_id"`
TplParams string `json:"tpl_params,omitempty"`
Status string `json:"status,omitempty"`
VerifyCode string `json:"verify_code,omitempty"`
VerifyType string `json:"verify_type,omitempty"`
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
tests/test_mnist_simple.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import tensorlayer as tl
from tests.utils import CustomTestCase
class Simple_MNIST_Test(CustomTestCase):
@classmethod
def setUpClass(cls):
# define placeholders
cls.x = tf.placeholder(tf.float32, shape=[None, 784], name='x')
cls.y_ = tf.placeholder(tf.int64, shape=[None], name='y_')
# define the network
network = tl.layers.InputLayer(cls.x, name='input')
network = tl.layers.DropoutLayer(network, keep=0.8, name='drop1')
network = tl.layers.DenseLayer(network, n_units=100, act=tf.nn.relu, name='relu1')
network = tl.layers.DropoutLayer(network, keep=0.8, name='drop2')
network = tl.layers.DenseLayer(network, n_units=100, act=tf.nn.relu, name='relu2')
network = tl.layers.DropoutLayer(network, keep=0.8, name='drop3')
# the softmax is implemented internally in tl.cost.cross_entropy(y, y_) to
# speed up computation, so we use identity here.
# see tf.nn.sparse_softmax_cross_entropy_with_logits()
cls.network = tl.layers.DenseLayer(network, n_units=10, name='output')
# define cost function and metric.
y = cls.network.outputs
cls.cost = tl.cost.cross_entropy(y, cls.y_, name='cost')
correct_prediction = tf.equal(tf.argmax(y, 1), cls.y_)
cls.acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# y_op = tf.argmax(tf.nn.softmax(y), 1)
# define the optimizer
train_params = cls.network.all_params
cls.train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cls.cost, var_list=train_params)
@classmethod
def tearDownClass(cls):
tf.reset_default_graph()
def test_reuse_vgg(self):
# prepare data
X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784))
# for fashion_MNIST dataset test
# X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_fashion_mnist_dataset(shape=(-1, 784))
with self.assertNotRaises(Exception):
with tf.Session() as sess:
# initialize all variables in the session
tl.layers.initialize_global_variables(sess)
# print network information
self.network.print_params()
self.network.print_layers()
# train the network
tl.utils.fit(
sess, self.network, self.train_op, self.cost, X_train, y_train, self.x, self.y_, acc=self.acc,
batch_size=500, n_epoch=1, print_freq=1, X_val=X_val, y_val=y_val, eval_train=False
)
# evaluation
tl.utils.test(
sess, self.network, self.acc, X_test, y_test, self.x, self.y_, batch_size=None, cost=self.cost
)
# save the network to .npz file
tl.files.save_npz(self.network.all_params, name='model.npz')
sess.close()
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.DEBUG)
tl.logging.set_verbosity(tl.logging.DEBUG)
unittest.main()
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
bootstrap/discovery/discovery.go
|
package discovery
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"runtime"
"time"
"github.com/drycc/drycc/pkg/version"
"golang.org/x/crypto/ssh"
)
type Info struct {
ClusterURL string
InstanceURL string
Name string
}
type Instance struct {
ID string `json:"id,omitempty"`
ClusterID string `json:"cluster_id,omitempty"`
DryccVersion string `json:"drycc_version,omitempty"`
SSHPublicKeys []SSHPublicKey `json:"ssh_public_keys,omitempty"`
URL string `json:"url,omitempty"`
Name string `json:"name,omitempty"`
CreatedAt *time.Time `json:"created_at,omitempty"`
}
type SSHPublicKey struct {
Type string `json:"type"`
Data []byte `json:"data"`
}
func RegisterInstance(info Info) (string, error) {
data := struct {
Data Instance `json:"data"`
}{Instance{
Name: info.Name,
URL: info.InstanceURL,
SSHPublicKeys: make([]SSHPublicKey, 0, 4),
DryccVersion: version.String(),
}}
for _, t := range []string{"dsa", "rsa", "ecdsa", "ed25519"} {
keyData, err := ioutil.ReadFile(fmt.Sprintf("/etc/ssh/ssh_host_%s_key.pub", t))
if err != nil {
// TODO(titanous): log this?
continue
}
k, _, _, _, err := ssh.ParseAuthorizedKey(keyData)
if err != nil {
// TODO(titanous): log this?
continue
}
data.Data.SSHPublicKeys = append(data.Data.SSHPublicKeys, SSHPublicKey{Type: t, Data: k.Marshal()})
}
jsonData, err := json.Marshal(&data)
if err != nil {
return "", err
}
// TODO(titanous): retry
uri := info.ClusterURL + "/instances"
res, err := http.Post(uri, "application/json", bytes.NewReader(jsonData))
if err != nil {
return "", err
}
if res.StatusCode != http.StatusCreated && res.StatusCode != http.StatusConflict {
return "", urlError("POST", uri, res.StatusCode)
}
if err := json.NewDecoder(res.Body).Decode(&data); err != nil {
return "", err
}
return data.Data.ID, nil
}
func GetCluster(uri string) ([]*Instance, error) {
uri += "/instances"
res, err := http.Get(uri)
if err != nil {
return nil, err
}
if res.StatusCode != 200 {
return nil, urlError("GET", uri, res.StatusCode)
}
defer res.Body.Close()
var data struct {
Data []*Instance `json:"data"`
}
err = json.NewDecoder(res.Body).Decode(&data)
return data.Data, err
}
func NewToken() (string, error) {
uri := "https://discovery.drycc.cc/clusters"
if base := os.Getenv("DISCOVERY_SERVER"); base != "" {
uri = base + "/clusters"
}
req, err := http.NewRequest("POST", uri, nil)
if err != nil {
return "", err
}
req.Header.Set("User-Agent", fmt.Sprintf("drycc-host/%s %s-%s", version.String(), runtime.GOOS, runtime.GOARCH))
res, err := http.DefaultClient.Do(req)
if err != nil {
return "", err
}
if res.StatusCode != http.StatusCreated {
return "", urlError("POST", uri, res.StatusCode)
}
base, err := url.Parse(uri)
if err != nil {
return "", err
}
cluster, err := url.Parse(res.Header.Get("Location"))
if err != nil {
return "", err
}
return base.ResolveReference(cluster).String(), nil
}
func urlError(method, uri string, status int) error {
return &url.Error{
Op: method,
URL: uri,
Err: fmt.Errorf("unexpected status %d", status),
}
}
|
[
"\"DISCOVERY_SERVER\""
] |
[] |
[
"DISCOVERY_SERVER"
] |
[]
|
["DISCOVERY_SERVER"]
|
go
| 1 | 0 | |
core/router/node/node.go
|
package node
import (
"sort"
"strings"
"github.com/kataras/iris/context"
"github.com/kataras/iris/core/errors"
)
// Nodes a conversion type for []*node.
type Nodes []*node
type node struct {
s string
routeName string
wildcardParamName string // name of the wildcard parameter, only one per whole Node is allowed
paramNames []string // only-names
childrenNodes Nodes
handlers context.Handlers
root bool
rootWildcard bool // if it's a wildcard {path} type on root, it should allow everything but it is not conflicts with
// any other static or dynamic or wildcard paths if exists on other nodes.
}
// ErrDublicate returnned from `Add` when two or more routes have the same registered path.
var ErrDublicate = errors.New("two or more routes have the same registered path")
/// TODO: clean up needed until v8.5
// Add adds a node to the tree, returns an ErrDublicate error on failure.
func (nodes *Nodes) Add(routeName string, path string, handlers context.Handlers) error {
// println("[Add] adding path: " + path)
// resolve params and if that node should be added as root
var params []string
var paramStart, paramEnd int
for {
paramStart = strings.IndexByte(path[paramEnd:], ':')
if paramStart == -1 {
break
}
paramStart += paramEnd
paramStart++
paramEnd = strings.IndexByte(path[paramStart:], '/')
if paramEnd == -1 {
params = append(params, path[paramStart:])
path = path[:paramStart]
break
}
paramEnd += paramStart
params = append(params, path[paramStart:paramEnd])
path = path[:paramStart] + path[paramEnd:]
paramEnd -= paramEnd - paramStart
}
var p []int
for i := 0; i < len(path); i++ {
idx := strings.IndexByte(path[i:], ':')
if idx == -1 {
break
}
p = append(p, idx+i)
i = idx + i
}
for _, idx := range p {
// print("-2 nodes.Add: path: " + path + " params len: ")
// println(len(params))
if err := nodes.add(routeName, path[:idx], nil, nil, true); err != nil {
return err
}
// print("-1 nodes.Add: path: " + path + " params len: ")
// println(len(params))
if nidx := idx + 1; len(path) > nidx {
if err := nodes.add(routeName, path[:nidx], nil, nil, true); err != nil {
return err
}
}
}
// print("nodes.Add: path: " + path + " params len: ")
// println(len(params))
if err := nodes.add(routeName, path, params, handlers, true); err != nil {
return err
}
// prioritize by static path remember, they were already sorted by subdomains too.
nodes.prioritize()
return nil
}
func (nodes *Nodes) add(routeName, path string, paramNames []string, handlers context.Handlers, root bool) (err error) {
// println("[add] route name: " + routeName)
// println("[add] adding path: " + path)
// wraia etsi doulevei ara
// na to kanw na exei to node to diko tou wildcard parameter name
// kai sto telos na pernei auto, me vasi to *paramname
// alla edw mesa 9a ginete register vasi tou last /
// set the wildcard param name to the root and its children.
wildcardIdx := strings.IndexByte(path, '*')
wildcardParamName := ""
if wildcardIdx > 0 && len(paramNames) == 0 { // 27 Oct comment: && len(paramNames) == 0 {
wildcardParamName = path[wildcardIdx+1:]
path = path[0:wildcardIdx-1] + "/" // replace *paramName with single slash
// if path[len(path)-1] == '/' {
// if root wildcard, then add it as it's and return
rootWildcard := path == "/"
if rootWildcard {
path += "/" // if root wildcard, then do it like "//" instead of simple "/"
}
n := &node{
rootWildcard: rootWildcard,
s: path,
routeName: routeName,
wildcardParamName: wildcardParamName,
paramNames: paramNames,
handlers: handlers,
root: root,
}
*nodes = append(*nodes, n)
// println("1. nodes.Add path: " + path)
return
}
loop:
for _, n := range *nodes {
if n.rootWildcard {
continue
}
if len(n.paramNames) == 0 && n.wildcardParamName != "" {
continue
}
minlen := len(n.s)
if len(path) < minlen {
minlen = len(path)
}
for i := 0; i < minlen; i++ {
if n.s[i] == path[i] {
continue
}
if i == 0 {
continue loop
}
*n = node{
s: n.s[:i],
childrenNodes: Nodes{
{
s: n.s[i:],
routeName: n.routeName,
wildcardParamName: n.wildcardParamName, // wildcardParamName
paramNames: n.paramNames,
childrenNodes: n.childrenNodes,
handlers: n.handlers,
},
{
s: path[i:],
routeName: routeName,
wildcardParamName: wildcardParamName,
paramNames: paramNames,
handlers: handlers,
},
},
root: n.root,
}
// println("2. change n and return " + n.s[:i] + " and " + path[i:])
return
}
if len(path) < len(n.s) {
// println("3. change n and return | n.s[:len(path)] = " + n.s[:len(path)-1] + " and child: " + n.s[len(path)-1:])
*n = node{
s: n.s[:len(path)],
routeName: routeName,
wildcardParamName: wildcardParamName,
paramNames: paramNames,
childrenNodes: Nodes{
{
s: n.s[len(path):],
routeName: n.routeName,
wildcardParamName: n.wildcardParamName, // wildcardParamName
paramNames: n.paramNames,
childrenNodes: n.childrenNodes,
handlers: n.handlers,
},
},
handlers: handlers,
root: n.root,
}
return
}
if len(path) > len(n.s) {
if n.wildcardParamName != "" {
n := &node{
s: path,
routeName: routeName,
wildcardParamName: wildcardParamName,
paramNames: paramNames,
handlers: handlers,
root: root,
}
// println("3.5. nodes.Add path: " + n.s)
*nodes = append(*nodes, n)
return
}
pathToAdd := path[len(n.s):]
// println("4. nodes.Add route name: " + routeName)
// println("4. nodes.Add path: " + pathToAdd)
err = n.childrenNodes.add(routeName, pathToAdd, paramNames, handlers, false)
return err
}
if len(handlers) == 0 { // missing handlers
return nil
}
if len(n.handlers) > 0 { // n.handlers already setted
return ErrDublicate
}
n.paramNames = paramNames
n.handlers = handlers
n.routeName = routeName
return
}
// START
// Author's note:
// 27 Oct 2017; fixes s|i|l+static+p
// without breaking the current tests.
if wildcardIdx > 0 {
wildcardParamName = path[wildcardIdx+1:]
path = path[0:wildcardIdx-1] + "/"
}
// END
n := &node{
s: path,
routeName: routeName,
wildcardParamName: wildcardParamName,
paramNames: paramNames,
handlers: handlers,
root: root,
}
*nodes = append(*nodes, n)
// println("5. node add on path: " + path + " n.s: " + n.s + " wildcard param: " + n.wildcardParamName)
return
}
// Find resolves the path, fills its params
// and returns the registered to the resolved node's handlers.
func (nodes Nodes) Find(path string, params *context.RequestParams) (string, context.Handlers) {
n, paramValues := nodes.findChild(path, nil)
if n != nil {
// map the params,
// n.params are the param names
if len(paramValues) > 0 {
// println("-----------")
// print("param values returned len: ")
// println(len(paramValues))
// println("first value is: " + paramValues[0])
// print("n.paramNames len: ")
// println(len(n.paramNames))
for i, name := range n.paramNames {
// println("setting param name: " + name + " = " + paramValues[i])
params.Set(name, paramValues[i])
}
// last is the wildcard,
// if paramValues are exceed from the registered param names.
// Note that n.wildcardParamName can be not empty but that doesn't meaning
// that it contains a wildcard path, so the check is required.
if len(paramValues) > len(n.paramNames) {
// println("len(paramValues) > len(n.paramNames)")
lastWildcardVal := paramValues[len(paramValues)-1]
// println("setting wildcard param name: " + n.wildcardParamName + " = " + lastWildcardVal)
params.Set(n.wildcardParamName, lastWildcardVal)
}
}
return n.routeName, n.handlers
}
return "", nil
}
// Exists returns true if a node with that "path" exists,
// otherise false.
//
// We don't care about parameters here.
func (nodes Nodes) Exists(path string) bool {
n, _ := nodes.findChild(path, nil)
return n != nil && len(n.handlers) > 0
}
func (nodes Nodes) findChild(path string, params []string) (*node, []string) {
for _, n := range nodes {
if n.s == ":" {
paramEnd := strings.IndexByte(path, '/')
if paramEnd == -1 {
if len(n.handlers) == 0 {
return nil, nil
}
return n, append(params, path)
}
return n.childrenNodes.findChild(path[paramEnd:], append(params, path[:paramEnd]))
}
// println("n.s: " + n.s)
// print("n.childrenNodes len: ")
// println(len(n.childrenNodes))
// print("n.root: ")
// println(n.root)
// by runtime check of:,
// if n.s == "//" && n.root && n.wildcardParamName != "" {
// but this will slow down, so we have a static field on the node itself:
if n.rootWildcard {
// println("return from n.rootWildcard")
// single root wildcard
if len(path) < 2 {
// do not remove that, it seems useless but it's not,
// we had an error while production, this fixes that.
path = "/" + path
}
return n, append(params, path[1:])
}
// second conditional may be unnecessary
// because of the n.rootWildcard before, but do it.
if n.wildcardParamName != "" && len(path) > 2 {
// println("n has wildcard n.s: " + n.s + " on path: " + path)
// n.s = static/, path = static
// println(n.s + " vs path: " + path)
// we could have /other/ as n.s so
// we must do this check, remember:
// now wildcards live on their own nodes
if len(path) == len(n.s)-1 {
// then it's like:
// path = /other2
// ns = /other2/
if path == n.s[0:len(n.s)-1] {
return n, params
}
}
// othwerwise path = /other2/dsadas
// ns= /other2/
if strings.HasPrefix(path, n.s) {
if len(path) > len(n.s)+1 {
return n, append(params, path[len(n.s):]) // without slash
}
}
}
if !strings.HasPrefix(path, n.s) {
// fmt.Printf("---here root: %v, n.s: "+n.s+" and path: "+path+" is dynamic: %v , wildcardParamName: %s, children len: %v \n", n.root, n.isDynamic(), n.wildcardParamName, len(n.childrenNodes))
// println(path + " n.s: " + n.s + " continue...")
continue
}
if len(path) == len(n.s) {
if len(n.handlers) == 0 {
return nil, nil
}
return n, params
}
child, childParamNames := n.childrenNodes.findChild(path[len(n.s):], params)
// print("childParamNames len: ")
// println(len(childParamNames))
// if len(childParamNames) > 0 {
// println("childParamsNames[0] = " + childParamNames[0])
// }
if child == nil || len(child.handlers) == 0 {
if n.s[len(n.s)-1] == '/' && !(n.root && (n.s == "/" || len(n.childrenNodes) > 0)) {
if len(n.handlers) == 0 {
return nil, nil
}
// println("if child == nil.... | n.s = " + n.s)
// print("n.paramNames len: ")
// println(n.paramNames)
// print("n.wildcardParamName is: ")
// println(n.wildcardParamName)
// print("return n, append(params, path[len(n.s) | params: ")
// println(path[len(n.s):])
return n, append(params, path[len(n.s):])
}
continue
}
return child, childParamNames
}
return nil, nil
}
// childLen returns all the children's and their children's length.
func (n *node) childLen() (i int) {
for _, n := range n.childrenNodes {
i++
i += n.childLen()
}
return
}
func (n *node) isDynamic() bool {
return n.s == ":" || n.wildcardParamName != "" || n.rootWildcard
}
// prioritize sets the static paths first.
func (nodes Nodes) prioritize() {
sort.Slice(nodes, func(i, j int) bool {
if nodes[i].isDynamic() {
return false
}
if nodes[j].isDynamic() {
return true
}
return nodes[i].childLen() > nodes[j].childLen()
})
for _, n := range nodes {
n.childrenNodes.prioritize()
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
models/logging.go
|
package models
import (
"go.uber.org/zap"
"os"
"go.uber.org/zap/zapcore"
)
var unsetEnvVar = ""
var logr *zap.Logger
var sugaredLogr *zap.SugaredLogger
func Logger() *zap.Logger {
if logr == nil {
if isProduction() {
encoderConfig := zapcore.EncoderConfig{
// Keys can be anything except the empty string.
TimeKey: "T",
MessageKey: "M",
LineEnding: zapcore.DefaultLineEnding,
EncodeTime: zapcore.ISO8601TimeEncoder,
EncodeDuration: zapcore.StringDurationEncoder,
}
config := zap.Config{
Level: zap.NewAtomicLevelAt(zap.InfoLevel),
Development: false,
Encoding: "console",
EncoderConfig: encoderConfig,
OutputPaths: []string{"stdout"},
ErrorOutputPaths: []string{"stderr"},
}
logr, _ = config.Build()
} else {
logr, _ = zap.NewDevelopment()
}
}
return logr
}
func isProduction() bool {
return os.Getenv("DEBUG") == unsetEnvVar
}
func SugaredLogger() *zap.SugaredLogger {
if sugaredLogr == nil {
sugaredLogr = Logger().Sugar()
}
return sugaredLogr
}
|
[
"\"DEBUG\""
] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
go
| 1 | 0 | |
assocentity_test.go
|
package assocentity
import (
"log"
"os"
"reflect"
"testing"
"github.com/joho/godotenv"
"github.com/ndabAP/assocentity/v8/tokenize"
)
var credentialsFile string
func NewNLP(lang tokenize.Lang) *tokenize.NLP {
nlp, err := tokenize.NewNLP(credentialsFile, lang)
if err != nil {
log.Fatal(err)
}
return nlp
}
func TestAssocIntegrationSingleWordEntities(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
if err := godotenv.Load(); err != nil {
log.Fatal(err)
}
credentialsFile = os.Getenv("GOOGLE_NLP_SERVICE_ACCOUNT_FILE_LOCATION")
text := "Punchinello wanted Payne? He'd see the pain."
entities := []string{"Punchinello", "Payne"}
nlp := NewNLP("en")
dps := tokenize.NewPoSDetermer(tokenize.ANY)
got, err := Do(nlp, dps, text, entities)
if err != nil {
log.Fatal(err)
}
want := map[tokenize.Token]float64{
{PoS: tokenize.VERB, Token: "wanted"}: 1,
{PoS: tokenize.PUNCT, Token: "?"}: 2,
{PoS: tokenize.PRON, Token: "He"}: 3,
{PoS: tokenize.VERB, Token: "'d"}: 4,
{PoS: tokenize.VERB, Token: "see"}: 5,
{PoS: tokenize.DET, Token: "the"}: 6,
{PoS: tokenize.NOUN, Token: "pain"}: 7,
{PoS: tokenize.PUNCT, Token: "."}: 8,
}
if !reflect.DeepEqual(got, want) {
t.Errorf("Assoc() = %v, want %v", got, want)
}
}
func TestAssocIntegrationSingleWordEntitiesEnglishLanguage(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
if err := godotenv.Load(); err != nil {
log.Fatal(err)
}
credentialsFile = os.Getenv("GOOGLE_NLP_SERVICE_ACCOUNT_FILE_LOCATION")
text := "Punchinello wanted Payne? He'd see the pain."
entities := []string{"Punchinello", "Payne"}
nlp := NewNLP("en")
dps := tokenize.NewPoSDetermer(tokenize.ANY)
got, err := Do(nlp, dps, text, entities)
if err != nil {
log.Fatal(err)
}
want := map[tokenize.Token]float64{
{PoS: tokenize.VERB, Token: "wanted"}: 1,
{PoS: tokenize.PUNCT, Token: "?"}: 2,
{PoS: tokenize.PRON, Token: "He"}: 3,
{PoS: tokenize.VERB, Token: "'d"}: 4,
{PoS: tokenize.VERB, Token: "see"}: 5,
{PoS: tokenize.DET, Token: "the"}: 6,
{PoS: tokenize.NOUN, Token: "pain"}: 7,
{PoS: tokenize.PUNCT, Token: "."}: 8,
}
if !reflect.DeepEqual(got, want) {
t.Errorf("Assoc() = %v, want %v", got, want)
}
}
func TestAssocIntegrationMultiWordEntities(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
if err := godotenv.Load(); err != nil {
log.Fatal(err)
}
credentialsFile = os.Getenv("GOOGLE_NLP_SERVICE_ACCOUNT_FILE_LOCATION")
text := "Max Payne, this is Deputy Chief Jim Bravura from the NYPD."
entities := []string{"Max Payne", "Jim Bravura"}
nlp := NewNLP("en")
dps := tokenize.NewPoSDetermer(tokenize.ANY)
got, err := Do(nlp, dps, text, entities)
if err != nil {
log.Fatal(err)
}
want := map[tokenize.Token]float64{
{PoS: tokenize.PUNCT, Token: ","}: 3,
{PoS: tokenize.DET, Token: "this"}: 3,
{PoS: tokenize.VERB, Token: "is"}: 3,
{PoS: tokenize.NOUN, Token: "Deputy"}: 3,
{PoS: tokenize.NOUN, Token: "Chief"}: 3,
{PoS: tokenize.ADP, Token: "from"}: 4,
{PoS: tokenize.DET, Token: "the"}: 5,
{PoS: tokenize.NOUN, Token: "NYPD"}: 6,
{PoS: tokenize.PUNCT, Token: "."}: 7,
}
if !reflect.DeepEqual(got, want) {
t.Errorf("Assoc() = %v, want %v", got, want)
}
}
func TestAssocIntegrationDefinedPartOfSpeech(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
if err := godotenv.Load(); err != nil {
log.Fatal(err)
}
credentialsFile = os.Getenv("GOOGLE_NLP_SERVICE_ACCOUNT_FILE_LOCATION")
text := `"The things that I want", by Max Payne.`
entities := []string{"Max Payne"}
nlp := NewNLP("en")
dps := tokenize.NewPoSDetermer(tokenize.DET | tokenize.VERB | tokenize.PUNCT)
got, err := Do(nlp, dps, text, entities)
if err != nil {
log.Fatal(err)
}
want := map[tokenize.Token]float64{
{PoS: tokenize.PUNCT, Token: `"`}: 4,
{PoS: tokenize.DET, Token: "The"}: 5,
{PoS: tokenize.DET, Token: "that"}: 4,
{PoS: tokenize.VERB, Token: "want"}: 3,
{PoS: tokenize.PUNCT, Token: ","}: 1,
{PoS: tokenize.PUNCT, Token: "."}: 1,
}
if !reflect.DeepEqual(got, want) {
t.Errorf("Assoc() = %v, want %v", got, want)
}
}
// Create a custom NLP instance
type nlpTest struct{}
// Second iteration is always for entites
var iterations int
func (n *nlpTest) Tokenize(text string) ([]tokenize.Token, error) {
if iterations == 0 {
iterations++
return []tokenize.Token{
{
Token: "Punchinello",
PoS: tokenize.NOUN,
},
{
Token: "was",
PoS: tokenize.VERB,
},
{
Token: "burning",
PoS: tokenize.VERB,
},
{
Token: "to",
PoS: tokenize.PRT,
},
{
Token: "get",
PoS: tokenize.VERB,
},
{
Token: "me",
PoS: tokenize.PRON,
},
}, nil
}
return []tokenize.Token{
{
Token: "Punchinello",
PoS: tokenize.NOUN,
},
}, nil
}
func TestAssocIntegrationSingleWordEntitiesShort(t *testing.T) {
text := "Punchinello was burning to get me"
entities := []string{"Punchinello"}
dps := tokenize.NewPoSDetermer(tokenize.ANY)
got, err := Do(&nlpTest{}, dps, text, entities)
if err != nil {
log.Fatal(err)
}
want := map[tokenize.Token]float64{
{PoS: tokenize.VERB, Token: "was"}: 1,
{PoS: tokenize.VERB, Token: "burning"}: 2,
{PoS: tokenize.PRT, Token: "to"}: 3,
{PoS: tokenize.VERB, Token: "get"}: 4,
{PoS: tokenize.PRON, Token: "me"}: 5,
}
if !reflect.DeepEqual(got, want) {
t.Errorf("Assoc() = %v, want %v", got, want)
}
}
func BenchmarkAssoc(b *testing.B) {
text := "Punchinello was burning to get me"
entities := []string{"Punchinello"}
dps := tokenize.NewPoSDetermer(tokenize.ANY)
for n := 0; n < b.N; n++ {
Do(&nlpTest{}, dps, text, entities)
}
}
|
[
"\"GOOGLE_NLP_SERVICE_ACCOUNT_FILE_LOCATION\"",
"\"GOOGLE_NLP_SERVICE_ACCOUNT_FILE_LOCATION\"",
"\"GOOGLE_NLP_SERVICE_ACCOUNT_FILE_LOCATION\"",
"\"GOOGLE_NLP_SERVICE_ACCOUNT_FILE_LOCATION\""
] |
[] |
[
"GOOGLE_NLP_SERVICE_ACCOUNT_FILE_LOCATION"
] |
[]
|
["GOOGLE_NLP_SERVICE_ACCOUNT_FILE_LOCATION"]
|
go
| 1 | 0 | |
main.py
|
# Made with python3
# (C) @FayasNoushad
# Copyright permission under MIT License
# All rights reserved by FayasNoushad
# License -> https://github.com/FayasNoushad/Telegraph-Uploader-Bot-V2/blob/main/LICENSE
import os
import time
import math
import json
import string
import random
import traceback
import asyncio
import datetime
import aiofiles
from random import choice
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from pyrogram.errors import FloodWait, InputUserDeactivated, UserIsBlocked, PeerIdInvalid, UserNotParticipant, UserBannedInChannel
from pyrogram.errors.exceptions.bad_request_400 import PeerIdInvalid
from telegraph import upload_file
from database import Database
UPDATE_CHANNEL = os.environ.get("UPDATE_CHANNEL", "")
BOT_OWNER = int(os.environ["BOT_OWNER"])
DATABASE_URL = os.environ["DATABASE_URL"]
db = Database(DATABASE_URL, "FnTelegraphBot")
Bot = Client(
"Telegraph Uploader Bot",
bot_token = os.environ["BOT_TOKEN"],
api_id = int(os.environ["API_ID"]),
api_hash = os.environ["API_HASH"],
)
START_TEXT = """**Hello {} 😌
I am small media or file to telegra.ph link uploader bot.**
>> `I can convert under 5MB photo or video to telegraph link.`
Powered by @SLBotsOfficial👑"""
HELP_TEXT = """**Hey, Follow these steps:**
➠ Just give me a media under 5MB
➠ Then I will download it
➠ I will then upload it to the telegra.ph link
**Available Commands**
/start - Checking Bot Online
/help - For more help
/about - For more about me
/status - For bot updates
Powered by @SLBotsOfficial👑"""
ABOUT_TEXT = """--**About Me**-- 😎
🤖 **Name :** [Telegraph Uploader](https://telegram.me/{})
👨💻 **Creator :** [Fayas](https://github.com/TharukRenuja)
📢 **Channel :** [Fayas Noushad](https://telegram.me/SLBotsOfficial)
📝 **Language :** [Python3](https://python.org)
🧰 **Framework :** [Pyrogram](https://pyrogram.org)
📡 **Server :** [Heroku](https://heroku.com)"""
FORCE_SUBSCRIBE_TEXT = "<code>Sorry Dear You Must Join My Updates Channel for using me 😌😉....</code>"
START_BUTTONS = InlineKeyboardMarkup(
[[
InlineKeyboardButton('⚙ Help', callback_data='help'),
InlineKeyboardButton('About 🔰', callback_data='about'),
InlineKeyboardButton('Close ✖️', callback_data='close')
]]
)
HELP_BUTTONS = InlineKeyboardMarkup(
[[
InlineKeyboardButton('🏘 Home', callback_data='home'),
InlineKeyboardButton('About 🔰', callback_data='about'),
InlineKeyboardButton('Close ✖️', callback_data='close')
]]
)
ABOUT_BUTTONS = InlineKeyboardMarkup(
[[
InlineKeyboardButton('🏘 Home', callback_data='home'),
InlineKeyboardButton('Help ⚙', callback_data='help'),
InlineKeyboardButton('Close ✖️', callback_data='close')
]]
)
async def send_msg(user_id, message):
try:
await message.copy(chat_id=user_id)
return 200, None
except FloodWait as e:
await asyncio.sleep(e.x)
return send_msg(user_id, message)
except InputUserDeactivated:
return 400, f"{user_id} : deactivated\n"
except UserIsBlocked:
return 400, f"{user_id} : user is blocked\n"
except PeerIdInvalid:
return 400, f"{user_id} : user id invalid\n"
except Exception as e:
return 500, f"{user_id} : {traceback.format_exc()}\n"
@Bot.on_callback_query()
async def cb_handler(bot, update):
if update.data == "home":
await update.message.edit_text(
text=START_TEXT.format(update.from_user.mention),
reply_markup=START_BUTTONS,
disable_web_page_preview=True
)
elif update.data == "help":
await update.message.edit_text(
text=HELP_TEXT,
reply_markup=HELP_BUTTONS,
disable_web_page_preview=True
)
elif update.data == "about":
await update.message.edit_text(
text=ABOUT_TEXT.format((await bot.get_me()).username),
reply_markup=ABOUT_BUTTONS,
disable_web_page_preview=True
)
else:
await update.message.delete()
@Bot.on_message(filters.private & filters.command(["start"]))
async def start(bot, update):
if not await db.is_user_exist(update.from_user.id):
await db.add_user(update.from_user.id)
await update.reply_text(
text=START_TEXT.format(update.from_user.mention),
disable_web_page_preview=True,
reply_markup=START_BUTTONS
)
@Bot.on_message(filters.private & filters.command(["help"]))
async def help(bot, update):
if not await db.is_user_exist(update.from_user.id):
await db.add_user(update.from_user.id)
await update.reply_text(
text=HELP_TEXT,
disable_web_page_preview=True,
reply_markup=HELP_BUTTONS
)
@Bot.on_message(filters.private & filters.command(["about"]))
async def about(bot, update):
if not await db.is_user_exist(update.from_user.id):
await db.add_user(update.from_user.id)
await update.reply_text(
text=ABOUT_TEXT.format((await bot.get_me()).username),
disable_web_page_preview=True,
reply_markup=ABOUT_BUTTONS
)
@Bot.on_message(filters.media & filters.private)
async def telegraph_upload(bot, update):
if not await db.is_user_exist(update.from_user.id):
await db.add_user(update.from_user.id)
if UPDATE_CHANNEL:
try:
user = await bot.get_chat_member(UPDATE_CHANNEL, update.chat.id)
if user.status == "kicked":
await update.reply_text(text="You are banned!")
return
except UserNotParticipant:
await update.reply_text(
text=FORCE_SUBSCRIBE_TEXT,
reply_markup=InlineKeyboardMarkup(
[[InlineKeyboardButton(text="⚙ Join Updates Channel ⚙", url=f"https://telegram.me/{UPDATE_CHANNEL}")]]
)
)
return
except Exception as error:
print(error)
await update.reply_text(text="Something wrong. Contact <a href='https://telegram.me/TharukRenuja'>Developer</a>.", disable_web_page_preview=True)
return
medianame = "./DOWNLOADS/" + "FayasNoushad/FnTelegraphBot"
text = await update.reply_text(
text="<code>Downloading to My Server ...</code>",
disable_web_page_preview=True
)
await bot.download_media(
message=update,
file_name=medianame
)
await text.edit_text(
text="<code>Downloading Completed. Now I am Uploading to telegra.ph Link ...</code>",
disable_web_page_preview=True
)
try:
response = upload_file(medianame)
except Exception as error:
print(error)
await text.edit_text(
text=f"Error :- {error}",
disable_web_page_preview=True
)
return
try:
os.remove(medianame)
except Exception as error:
print(error)
return
await text.edit_text(
text=f"<b>Link :-</b> <code>https://telegra.ph{response[0]}</code>\n\n<b>Join :-</b> @SLBotsOfficial",
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton(text="Open Link", url=f"https://telegra.ph{response[0]}"),
InlineKeyboardButton(text="Share Link", url=f"https://telegram.me/share/url?url=https://telegra.ph{response[0]}")
],
[InlineKeyboardButton(text="⚙ Join Updates Channel ⚙", url="https://telegram.me/SLBotsOfficial")]
]
)
)
@Bot.on_message(filters.private & filters.command("broadcast") & filters.user(BOT_OWNER) & filters.reply)
async def broadcast(bot, update):
broadcast_ids = {}
all_users = await db.get_all_users()
broadcast_msg = update.reply_to_message
while True:
broadcast_id = ''.join([random.choice(string.ascii_letters) for i in range(3)])
if not broadcast_ids.get(broadcast_id):
break
out = await update.reply_text(text=f"Broadcast Started! You will be notified with log file when all the users are notified.")
start_time = time.time()
total_users = await db.total_users_count()
done = 0
failed = 0
success = 0
broadcast_ids[broadcast_id] = dict(total = total_users, current = done, failed = failed, success = success)
async with aiofiles.open('broadcast.txt', 'w') as broadcast_log_file:
async for user in all_users:
sts, msg = await send_msg(user_id = int(user['id']), message = broadcast_msg)
if msg is not None:
await broadcast_log_file.write(msg)
if sts == 200:
success += 1
else:
failed += 1
if sts == 400:
await db.delete_user(user['id'])
done += 1
if broadcast_ids.get(broadcast_id) is None:
break
else:
broadcast_ids[broadcast_id].update(dict(current = done, failed = failed, success = success))
if broadcast_ids.get(broadcast_id):
broadcast_ids.pop(broadcast_id)
completed_in = datetime.timedelta(seconds=int(time.time()-start_time))
await asyncio.sleep(3)
await out.delete()
if failed == 0:
await update.reply_text(text=f"broadcast completed in `{completed_in}`\n\nTotal users {total_users}.\nTotal done {done}, {success} success and {failed} failed.", quote=True)
else:
await update.reply_document(document='broadcast.txt', caption=f"broadcast completed in `{completed_in}`\n\nTotal users {total_users}.\nTotal done {done}, {success} success and {failed} failed.")
os.remove('broadcast.txt')
@Bot.on_message(filters.private & filters.command("status"), group=5)
async def status(bot, update):
total_users = await db.total_users_count()
text = "**Bot Status**\n"
text += f"\n**Total Users:** `{total_users}`"
await update.reply_text(
text=text,
quote=True,
disable_web_page_preview=True
)
Bot.run()
|
[] |
[] |
[
"UPDATE_CHANNEL",
"DATABASE_URL",
"BOT_TOKEN",
"BOT_OWNER",
"API_ID",
"API_HASH"
] |
[]
|
["UPDATE_CHANNEL", "DATABASE_URL", "BOT_TOKEN", "BOT_OWNER", "API_ID", "API_HASH"]
|
python
| 6 | 0 | |
cmd/tail.go
|
/*
Copyright © 2020 @pyama86 [email protected]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/pyama86/isaka/isaka"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
// tailCmd represents the tail command
var tailCmd = &cobra.Command{
Use: "tail",
Short: "tail log from choose topic",
Long: `it is tail command for Apache kafka.
You can set Broker list from Zookeeper or CLI options.`,
Run: func(cmd *cobra.Command, args []string) {
if err := runTail(); err != nil {
fmt.Println(err)
os.Exit(1)
}
},
}
func runTail() error {
brokerEndpoints := []string{}
if config.KafkaBrokers == "" {
z, err := isaka.NewZookeeper(config.ZookeeperHost, config.ClusterName, time.Duration(config.ZookeeperTimeout)*time.Second)
if err != nil {
return err
}
brokersInfo, err := z.BrokerList()
if err != nil {
return err
}
brokerEndpoints = brokersInfo.BrokerEndpoints(config.Listener)
} else {
brokerEndpoints = strings.Split(config.KafkaBrokers, ",")
}
kafkaBrokers, err := isaka.NewKafkaBrokers(brokerEndpoints, config.KafkaTimeout, config.BrokerCA, config.BrokerCert, config.BrokerKey)
if err != nil {
return err
}
reader, err := kafkaBrokers.Reader(config.Topic, os.Getenv("USER"), config.Tail)
if err != nil {
return err
}
defer reader.Close()
var cnt int64
ctx := context.Background()
for {
ctx, cancel := context.WithTimeout(ctx, time.Duration(config.KafkaTimeout)*time.Second)
defer cancel()
m, err := reader.ReadMessage(ctx)
if err != nil {
return fmt.Errorf("topic=%s err=%s", config.Topic, err.Error())
}
fmt.Fprintln(os.Stdout, string(m.Value))
if !config.Follow {
cnt++
if cnt >= config.Tail {
break
}
}
}
return nil
}
func init() {
tailCmd.PersistentFlags().String("broker-ca", "", "ca cert file(Env:ISAKA_BROKERCA)")
viper.BindPFlag("BrokerCA", tailCmd.PersistentFlags().Lookup("broker-ca"))
tailCmd.PersistentFlags().String("broker-tls-cert", "", "tls cert file(Env:ISAKA_BROKERCERT)")
viper.BindPFlag("BrokerCert", tailCmd.PersistentFlags().Lookup("broker-tls-cert"))
tailCmd.PersistentFlags().String("broker-tls-key", "", "tls key file(Env:ISAKA_BROKERKEY)")
viper.BindPFlag("BrokerKey", tailCmd.PersistentFlags().Lookup("broker-tls-key"))
tailCmd.PersistentFlags().String("kafka-brokers", "", "kafka brokers(Env:ISAKA_BROKERS)")
viper.BindPFlag("KafkaBrokers", tailCmd.PersistentFlags().Lookup("kafka-brokers"))
tailCmd.PersistentFlags().StringP("topic", "t", "", "subscribe topic")
viper.BindPFlag("Topic", tailCmd.PersistentFlags().Lookup("topic"))
tailCmd.PersistentFlags().Int64P("tail", "n", 20, "tail line")
viper.BindPFlag("Tail", tailCmd.PersistentFlags().Lookup("tail"))
tailCmd.PersistentFlags().BoolP("follow", "f", false, "follow input")
viper.BindPFlag("Follow", tailCmd.PersistentFlags().Lookup("follow"))
tailCmd.PersistentFlags().StringP("listener", "l", "PLAINTEXT", "choose listener")
viper.BindPFlag("Listener", tailCmd.PersistentFlags().Lookup("listener"))
rootCmd.PersistentFlags().Int("kafka-timeout", 10, "kafka timeout")
viper.BindPFlag("KafkaTimeout", rootCmd.PersistentFlags().Lookup("kafka-timeout"))
rootCmd.AddCommand(tailCmd)
}
|
[
"\"USER\""
] |
[] |
[
"USER"
] |
[]
|
["USER"]
|
go
| 1 | 0 | |
src/plugins.py
|
#!/usr/bin/python3
# Plugins updater based on plugins list defined in configs/plugins.list file
# Orginal version is created by PaulWebbster
# https://github.com/PaulWebbster/vim-configuration
import sys
import os
from git import Repo
import re
class PluginsManager(object):
"""Manages the connection with the synchronization copy in remote git
repository. """
def __init__(self, vimhome=os.getenv('HOME') + '/.vim'):
"""
__init__
:param vimhome: the path to user .vim directory
"""
self.vimhome = vimhome
self.plugins_list = []
self.mgit = GitManager(self.vimhome)
def update_plugins_list(self, vimrc=os.getenv('HOME') + '/.vimrc'):
"""
update_plugins_list: updates the object plugins list based on plugin
list in vimrc file.
:param vimrc: path to .vimrc file
"""
with open(vimrc, 'r') as fvimrc:
ppta = re.compile("NeoBundle '([A-z0-9\/\-\.]+)'")
for line in fvimrc:
if "NeoBundle '" in line:
self.plugins_list.append(ppta.match(line).group(1))
def save_plugins_config(self):
"""
save_plugins_config: updates the config file plugins.list with
plugins added manualy by user in his .vimrc file
"""
with open(self.vimhome + '/configs/plugins.list', 'w+') as pluglist:
for plugin in self.plugins_list:
if not self.check_if_plugin_in_config(plugin):
pluglist.writeline("'{}'".format(plugin))
print('Plugin config list updated...')
def check_if_plugin_in_config(self, plugin):
with open(self.vimhome + 'configs/plugins.list', 'r') as plist:
for plug in plist:
if plugin in plug:
return True
return False
def save_plugins_configuration(self, gitinfo=None):
"""save_plugins_configuration: saves plugins information in
plugin_config file and sends it to git
:param git_info: Custom git info
"""
if not gitinfo:
gitinfo = """
"""
def update_vimrc_plugis(self):
with open(self.vimhome + '/configs/plugins.list', 'r') as plist:
plugins = ["NeoBundle " + plugin for plugin in plist]
with open(sys.argv[1] + '/configs/vimrc', 'r') as fvimrc:
vimrc = []
for index, line in enumerate(fvimrc):
vimrc.append(line)
if line.find('Plugins') != -1:
pline = index + 1
vimrc = vimrc[:pline]+plugins+vimrc[pline:]
with open(os.getenv("HOME") + '/.vimrc', 'w') as fvimrc:
fvimrc.writelines(vimrc)
class GitManager(object):
"""Manages the connection with the synchronization copy in remote git
repository. Automates commiting changes into reopository of plugins, changes
in vim configuration files and vim user disrectory. Uses GitPython framework
for repository connections"""
def __init__(self, repopath):
self.repo = Repo(repopath)
def commit(self, message=None, files=None):
self.repo
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
pkg/config/config_linux_test.go
|
// Copyright 2020 New Relic Corporation. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package config
import (
"io/ioutil"
"os"
. "gopkg.in/check.v1"
)
func (s *ConfigSuite) TestParseConfigOverride(c *C) {
config := `
compaction_threshold: 54
daemontools_refresh_sec: 32
verbose: 1
ignored_inventory:
- files/config/stuff.bar
- files/config/stuff.foo
license_key: abc123
custom_attributes:
my_group: test group
agent_role: test role
debug: false
overide_host_root: /dockerland
is_containerized: false
`
f, err := ioutil.TempFile("", "opsmatic_config_test_2")
c.Assert(err, IsNil)
_, _ = f.WriteString(config)
_ = f.Close()
cfg, err := LoadConfig(f.Name())
c.Assert(err, IsNil)
c.Assert(os.Getenv("HOST_ETC"), Equals, "/dockerland/etc")
c.Assert(cfg.IsContainerized, Equals, false)
c.Assert(cfg.IsForwardOnly, Equals, false)
c.Assert(cfg.IsSecureForwardOnly, Equals, false)
_ = os.Setenv("NRIA_LICENSE_KEY", "abcd1234")
_ = os.Setenv("NRIA_COMPACTION_THRESHOLD", "55")
_ = os.Setenv("NRIA_DAEMONTOOLS_INTERVAL_SEC", "33")
_ = os.Setenv("NRIA_VERBOSE", "0")
_ = os.Setenv("NRIA_DEBUG", "false")
_ = os.Setenv("NRIA_IGNORED_INVENTORY", "files/config/things.bar,files/config/things.foo")
_ = os.Setenv("NRIA_CUSTOM_ATTRIBUTES",
`{"my_groups":"testing group", "agent_roles":"testing role"}`)
_ = os.Setenv("NRIA_OVERRIDE_HOST_ETC", "/opt/etc")
_ = os.Setenv("NRIA_OVERRIDE_HOST_PROC", "/docker_proc")
_ = os.Setenv("NRIA_OVERRIDE_HOST_ROOT", "/dockerworld")
_ = os.Setenv("NRIA_OVERRIDE_HOST_SYS", "/docker_sys")
_ = os.Setenv("NRIA_IS_CONTAINERIZED", "true")
_ = os.Setenv("NRIA_IS_FORWARD_ONLY", "true")
_ = os.Setenv("NRIA_IS_SECURE_FORWARD_ONLY", "true")
defer func() {
_ = os.Unsetenv("NRIA_LICENSE_KEY")
_ = os.Unsetenv("NRIA_COMPACTION_THRESHOLD")
_ = os.Unsetenv("NRIA_DAEMONTOOLS_REFRESH_SEC")
_ = os.Unsetenv("NRIA_VERBOSE")
_ = os.Unsetenv("NRIA_DEBUG")
_ = os.Unsetenv("NRIA_IGNORED_INVENTORY")
_ = os.Unsetenv("NRIA_CUSTOM_ATTRIBUTES")
_ = os.Unsetenv("NRIA_OVERRIDE_HOST_ETC")
_ = os.Unsetenv("NRIA_OVERRIDE_HOST_PROC")
_ = os.Unsetenv("NRIA_OVERRIDE_HOST_ROOT")
_ = os.Unsetenv("NRIA_OVERRIDE_HOST_SYS")
_ = os.Unsetenv("NRIA_IS_CONTAINERIZED")
_ = os.Unsetenv("NRIA_IS_FORWARD_ONLY")
_ = os.Unsetenv("NRIA_IS_SECURE_FORWARD_ONLY")
_ = os.Unsetenv("HOST_SYS")
_ = os.Unsetenv("HOST_ETC")
_ = os.Unsetenv("HOST_PROC")
}()
configOverride(cfg)
c.Log(cfg.CustomAttributes)
c.Assert(cfg.License, Equals, "abcd1234")
c.Assert(cfg.CompactThreshold, Equals, uint64(55))
c.Assert(cfg.DaemontoolsRefreshSec, Equals, int64(33))
c.Assert(cfg.Verbose, Equals, 0)
c.Assert(cfg.Debug, Equals, false)
c.Assert(cfg.IgnoredInventoryPaths, DeepEquals, []string{"files/config/things.bar", "files/config/things.foo"})
c.Assert(cfg.CustomAttributes, DeepEquals, CustomAttributeMap{
"my_groups": "testing group",
"agent_roles": "testing role",
})
c.Assert(cfg.OverrideHostSys, Equals, "/dockerworld/docker_sys")
c.Assert(cfg.OverrideHostProc, Equals, "/dockerworld/docker_proc")
c.Assert(cfg.OverrideHostEtc, Equals, "/dockerworld/opt/etc")
c.Assert(os.Getenv("HOST_ETC"), Equals, "/dockerworld/opt/etc")
c.Assert(os.Getenv("HOST_PROC"), Equals, "/dockerworld/docker_proc")
c.Assert(os.Getenv("HOST_SYS"), Equals, "/dockerworld/docker_sys")
c.Assert(cfg.IsContainerized, Equals, true)
c.Assert(cfg.IsForwardOnly, Equals, true)
c.Assert(cfg.IsSecureForwardOnly, Equals, true)
}
|
[
"\"HOST_ETC\"",
"\"HOST_ETC\"",
"\"HOST_PROC\"",
"\"HOST_SYS\""
] |
[] |
[
"HOST_SYS",
"HOST_PROC",
"HOST_ETC"
] |
[]
|
["HOST_SYS", "HOST_PROC", "HOST_ETC"]
|
go
| 3 | 0 | |
.circleci/get_workflow_name.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Get workflow name for the current build using CircleCI API.
Would be great if this information is available in one of
CircleCI environment variables, but it's not there.
https://circleci.ideas.aha.io/ideas/CCI-I-295
"""
import json
import os
import sys
import urllib2
def main():
try:
username = os.environ['CIRCLE_PROJECT_USERNAME']
reponame = os.environ['CIRCLE_PROJECT_REPONAME']
build_num = os.environ['CIRCLE_BUILD_NUM']
except:
sys.stderr.write(
'Looks like we are not inside CircleCI container. Exiting...\n')
return 1
try:
request = urllib2.Request(
"https://circleci.com/api/v1.1/project/github/%s/%s/%s" %
(username, reponame, build_num),
headers={"Accept": "application/json"})
contents = urllib2.urlopen(request).read()
except:
sys.stderr.write('Cannot query CircleCI API. Exiting...\n')
return 1
try:
build_info = json.loads(contents)
except:
sys.stderr.write(
'Cannot parse JSON received from CircleCI API. Exiting...\n')
return 1
try:
workflow_name = build_info['workflows']['workflow_name']
except:
sys.stderr.write(
'Cannot get workflow name from CircleCI build info. Exiting...\n')
return 1
print workflow_name
return 0
retval = main()
exit(retval)
|
[] |
[] |
[
"CIRCLE_PROJECT_REPONAME",
"CIRCLE_BUILD_NUM",
"CIRCLE_PROJECT_USERNAME"
] |
[]
|
["CIRCLE_PROJECT_REPONAME", "CIRCLE_BUILD_NUM", "CIRCLE_PROJECT_USERNAME"]
|
python
| 3 | 0 | |
core/main.go
|
package autospotting
import (
"io/ioutil"
"log"
"os"
"sync"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
)
var logger, debug *log.Logger
// Run starts processing all AWS regions looking for AutoScaling groups
// enabled and taking action by replacing more pricy on-demand instances with
// compatible and cheaper spot instances.
func Run(cfg Config) {
setupLogging(cfg)
debug.Println(cfg)
// use this only to list all the other regions
ec2Conn := connectEC2(cfg.MainRegion)
allRegions, err := getRegions(ec2Conn)
if err != nil {
logger.Println(err.Error())
return
}
processRegions(allRegions, cfg)
}
func disableLogging() {
setupLogging(Config{LogFile: ioutil.Discard})
}
func setupLogging(cfg Config) {
logger = log.New(cfg.LogFile, "", cfg.LogFlag)
if os.Getenv("AUTOSPOTTING_DEBUG") == "true" {
debug = log.New(cfg.LogFile, "", cfg.LogFlag)
} else {
debug = log.New(ioutil.Discard, "", 0)
}
}
// processAllRegions iterates all regions in parallel, and replaces instances
// for each of the ASGs tagged with 'spot-enabled=true'.
func processRegions(regions []string, cfg Config) {
var wg sync.WaitGroup
for _, r := range regions {
wg.Add(1)
r := region{name: r, conf: cfg}
go func() {
if r.enabled() {
logger.Printf("Enabled to run in %s, processing region.\n", r.name)
r.processRegion()
} else {
debug.Println("Not enabled to run in", r.name)
debug.Println("List of enabled regions:", cfg.Regions)
}
wg.Done()
}()
}
wg.Wait()
}
func connectEC2(region string) *ec2.EC2 {
sess, err := session.NewSession()
if err != nil {
panic(err)
}
return ec2.New(sess,
aws.NewConfig().WithRegion(region))
}
// getRegions generates a list of AWS regions.
func getRegions(ec2conn ec2iface.EC2API) ([]string, error) {
var output []string
logger.Println("Scanning for available AWS regions")
resp, err := ec2conn.DescribeRegions(&ec2.DescribeRegionsInput{})
if err != nil {
logger.Println(err.Error())
return nil, err
}
debug.Println(resp)
for _, r := range resp.Regions {
if r != nil && r.RegionName != nil {
debug.Println("Found region", *r.RegionName)
output = append(output, *r.RegionName)
}
}
return output, nil
}
|
[
"\"AUTOSPOTTING_DEBUG\""
] |
[] |
[
"AUTOSPOTTING_DEBUG"
] |
[]
|
["AUTOSPOTTING_DEBUG"]
|
go
| 1 | 0 | |
setup_build.py
|
#!/usr/bin/env python3
"""
Implements a custom Distutils build_ext replacement, which handles the
full extension module build process, from api_gen to C compilation and
linking.
"""
try:
from setuptools import Extension
except ImportError:
from distutils.extension import Extension
from distutils.command.build_ext import build_ext
import sys
import os
import os.path as op
from pathlib import Path
import subprocess
import api_gen
from setup_configure import BuildConfig
def localpath(*args):
return op.abspath(op.join(op.dirname(__file__), *args))
MODULES = ['defs', '_errors', '_objects', '_proxy', 'h5fd', 'h5z',
'h5', 'h5i', 'h5r', 'utils', '_selector',
'_conv', 'h5t', 'h5s',
'h5p',
'h5d', 'h5a', 'h5f', 'h5g',
'h5l', 'h5o',
'h5ds', 'h5ac',
'h5pl']
EXTRA_SRC = {'h5z': [ localpath("lzf/lzf_filter.c"),
localpath("lzf/lzf/lzf_c.c"),
localpath("lzf/lzf/lzf_d.c")]}
COMPILER_SETTINGS = {
'libraries' : ['hdf5', 'hdf5_hl'],
'include_dirs' : [localpath('lzf')],
'library_dirs' : [],
'define_macros' : [('H5_USE_18_API', None),
('NPY_NO_DEPRECATED_API', 0),
]
}
if sys.platform.startswith('win'):
COMPILER_SETTINGS['include_dirs'].append(localpath('windows'))
COMPILER_SETTINGS['define_macros'].extend([
('_HDF5USEDLL_', None),
('H5_BUILT_AS_DYNAMIC_LIB', None)
])
class h5py_build_ext(build_ext):
"""
Custom distutils command which encapsulates api_gen pre-building,
Cython building, and C compilation.
Also handles making the Extension modules, since we can't rely on
NumPy being present in the main body of the setup script.
"""
@staticmethod
def _make_extensions(config):
""" Produce a list of Extension instances which can be passed to
cythonize().
This is the point at which custom directories, MPI options, etc.
enter the build process.
"""
import numpy
settings = COMPILER_SETTINGS.copy()
settings['include_dirs'][:0] = config.hdf5_includedirs
settings['library_dirs'][:0] = config.hdf5_libdirs
settings['define_macros'].extend(config.hdf5_define_macros)
try:
numpy_includes = numpy.get_include()
except AttributeError:
# if numpy is not installed get the headers from the .egg directory
import numpy.core
numpy_includes = os.path.join(os.path.dirname(numpy.core.__file__), 'include')
settings['include_dirs'] += [numpy_includes]
if config.mpi:
import mpi4py
settings['include_dirs'] += [mpi4py.get_include()]
# TODO: should this only be done on UNIX?
if os.name != 'nt':
settings['runtime_library_dirs'] = settings['library_dirs']
def make_extension(module):
sources = [localpath('h5py', module + '.pyx')] + EXTRA_SRC.get(module, [])
return Extension('h5py.' + module, sources, **settings)
return [make_extension(m) for m in MODULES]
def run(self):
""" Distutils calls this method to run the command """
from Cython import __version__ as cython_version
from Cython.Build import cythonize
import numpy
# This allows ccache to recognise the files when pip builds in a temp
# directory. It speeds up repeatedly running tests through tox with
# ccache configured (CC="ccache gcc"). It should have no effect if
# ccache is not in use.
os.environ['CCACHE_BASEDIR'] = op.dirname(op.abspath(__file__))
os.environ['CCACHE_NOHASHDIR'] = '1'
# Get configuration from environment variables
config = BuildConfig.from_env()
config.summarise()
defs_file = localpath('h5py', 'defs.pyx')
func_file = localpath('h5py', 'api_functions.txt')
config_file = localpath('h5py', 'config.pxi')
# Rebuild low-level defs if missing or stale
if not op.isfile(defs_file) or os.stat(func_file).st_mtime > os.stat(defs_file).st_mtime:
print("Executing api_gen rebuild of defs")
api_gen.run()
# Rewrite config.pxi file if needed
s = """\
# This file is automatically generated by the h5py setup script. Don't modify.
DEF MPI = %(mpi)s
DEF HDF5_VERSION = %(version)s
DEF SWMR_MIN_HDF5_VERSION = (1,9,178)
DEF VDS_MIN_HDF5_VERSION = (1,9,233)
DEF VOL_MIN_HDF5_VERSION = (1,11,5)
DEF COMPLEX256_SUPPORT = %(complex256_support)s
DEF NUMPY_BUILD_VERSION = '%(numpy_version)s'
DEF CYTHON_BUILD_VERSION = '%(cython_version)s'
"""
s %= {
'mpi': bool(config.mpi),
'version': config.hdf5_version,
'complex256_support': hasattr(numpy, 'complex256'),
'numpy_version': numpy.__version__,
'cython_version': cython_version,
}
write_if_changed(config_file, s)
# Run Cython
print("Executing cythonize()")
self.extensions = cythonize(self._make_extensions(config),
force=config.changed() or self.force,
language_level=3)
# Perform the build
build_ext.run(self)
# Record the configuration we built
config.record_built()
def write_if_changed(target_path, s: str):
"""Overwrite target_path unless the contents already match s
Avoids changing the mtime when we're just writing the same data.
"""
p = Path(target_path)
b = s.encode('utf-8')
try:
if p.read_bytes() == b:
return
except FileNotFoundError:
pass
p.write_bytes(b)
|
[] |
[] |
[
"CCACHE_BASEDIR",
"CCACHE_NOHASHDIR"
] |
[]
|
["CCACHE_BASEDIR", "CCACHE_NOHASHDIR"]
|
python
| 2 | 0 | |
bryansWebsite/wsgi.py
|
"""
WSGI config for bryansWebsite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bryansWebsite.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
expvars.go
|
package main
import (
"errors"
"io"
"net/http"
"net/url"
"os"
"time"
"github.com/antonholmquist/jason"
)
// DefaultEndpoint is the default url for fetching expvar info.
var DefaultEndpoint = "/debug/vars"
// Expvar represents fetched expvar variable.
type Expvar struct {
*jason.Object
}
func getBasicAuthEnv() (user, password string) {
return os.Getenv("HTTP_USER"), os.Getenv("HTTP_PASSWORD")
}
// FetchExpvar fetches expvar by http for the given addr (host:port)
func FetchExpvar(u url.URL) (*Expvar, error) {
e := &Expvar{&jason.Object{}}
client := &http.Client{
Timeout: 1 * time.Second, // TODO: make it configurable or left default?
}
req, _ := http.NewRequest("GET", "localhost", nil)
req.URL = &u
req.Host = u.Host
if user, pass := getBasicAuthEnv(); user != "" && pass != "" {
req.SetBasicAuth(user, pass)
}
resp, err := client.Do(req)
if err != nil {
return e, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotFound {
return e, errors.New("Vars not found. Did you import expvars?")
}
expvar, err := ParseExpvar(resp.Body)
if err != nil {
return e, err
}
e = expvar
//fmt.Printf("%#v\n", *(e.Object))
return e, nil
}
// ParseExpvar parses expvar data from reader.
func ParseExpvar(r io.Reader) (*Expvar, error) {
object, err := jason.NewObjectFromReader(r)
return &Expvar{object}, err
}
|
[
"\"HTTP_USER\"",
"\"HTTP_PASSWORD\""
] |
[] |
[
"HTTP_USER",
"HTTP_PASSWORD"
] |
[]
|
["HTTP_USER", "HTTP_PASSWORD"]
|
go
| 2 | 0 | |
test/integration/test_cli_utils.py
|
import elasticsearch
import curator
import os
import click
from click import testing as clicktest
from mock import patch, Mock
from . import CuratorTestCase
import logging
logger = logging.getLogger(__name__)
host, port = os.environ.get('TEST_ES_SERVER', 'localhost:9200').split(':')
port = int(port) if port else 9200
class TestGetClient(CuratorTestCase):
def test_get_client_positive(self):
client_args = {"host":host, "port":port}
client = curator.get_client(**client_args)
self.assertTrue(isinstance(client, elasticsearch.client.Elasticsearch))
def test_get_client_negative_connection_fail(self):
client_args = {"host":host, "port":54321}
with self.assertRaises(SystemExit) as cm:
curator.get_client(**client_args)
self.assertEqual(cm.exception.code, 1)
class TestCLIUtilsFilterCallback(CuratorTestCase):
def test_filter_callback_without_timestring(self):
test = clicktest.CliRunner()
result = test.invoke(
curator.cli,
[
'--logfile', os.devnull,
'--host', host,
'--port', str(port),
'show',
'indices',
'--older-than', '5',
'--time-unit', 'days',
],
obj={"filters":[]})
self.assertEqual(1, result.exit_code)
def test_filter_callback_without_timeunit(self):
test = clicktest.CliRunner()
result = test.invoke(
curator.cli,
[
'--logfile', os.devnull,
'--host', host,
'--port', str(port),
'show',
'indices',
'--newer-than', '5',
'--timestring', '%Y.%m.%d',
],
obj={"filters":[]})
self.assertEqual(1, result.exit_code)
|
[] |
[] |
[
"TEST_ES_SERVER"
] |
[]
|
["TEST_ES_SERVER"]
|
python
| 1 | 0 | |
Zoocmd/web/zooapi/views/task.py
|
# -*- coding: utf-8 -*-
from web.helpers.http import HttpResponseNotAllowed, get_object_or_404
from web.helpers.json import json_response, json_request, json_response_raw
from core.task_manager import TaskManager
from core.job import Job
from core.models.database import JobPeewee as JobDataModel
from core.helpers.common import kill_proc_tree
# using
@json_response_raw
def task_paging(request):
"""
Получить список заданий
:param request:
:return:
"""
# task_paging/list/?sort=created&order=desc&limit=100&offset=0"
# ?limit=10&offset=10&order=asc
sorting = JobDataModel.id.desc()
if 'sort' in request.GET:
sorting_value = request.GET['sort']
if sorting_value == 'created':
if 'order' in request.GET and request.GET['order'] == 'asc':
sorting = JobDataModel.created.asc()
else:
sorting = JobDataModel.created.desc()
if sorting_value == 'title':
if 'order' in request.GET and request.GET['order'] == 'asc':
sorting = JobDataModel.title.asc()
else:
sorting = JobDataModel.title.desc()
if sorting_value == 'command':
if 'order' in request.GET and request.GET['order'] == 'asc':
sorting = JobDataModel.command.asc()
else:
sorting = JobDataModel.command.desc()
if sorting_value == 'status':
if 'order' in request.GET and request.GET['order'] == 'asc':
sorting = JobDataModel.status.asc()
else:
sorting = JobDataModel.status.desc()
limit = int(request.GET['limit'])
offset = int(request.GET['offset'])
rows = None
if 'search' in request.GET and len(request.GET['search'])>3:
search_string = request.GET['search']
rows = [t.to_dict() for t in JobDataModel.select().order_by(sorting).where(JobDataModel.title.contains(search_string)).offset(offset).limit(limit) ]
count = JobDataModel.select().where(JobDataModel.title.contains(search_string)).count()
return {"total": count, "rows": rows}
else:
rows = [t.to_dict() for t in JobDataModel.select().order_by(sorting).offset(offset).limit(limit) ]
count = JobDataModel.select().count()
return {"total": count, "rows": rows}
# using
@json_response
def task_list(request):
"""
Получить список заданий
:param request:
:return:
"""
return [t.to_dict() for t in JobDataModel.select().order_by(JobDataModel.id.desc())]
# do not using
@json_response
def task(request, task_id):
"""
Получить конкретное задание по номеру task_id
:param request:
:param task_id:
:return:
"""
t = get_object_or_404(JobDataModel, id=task_id)
return t.to_dict()
# cancel running job
@json_response
def cancel_task(request, job_id):
"""
Отменить конкретное задание по номеру task_id
:param request:
:param task_id:
:return:
"""
#TODO add is task alive
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
t = get_object_or_404(JobDataModel, id=job_id)
t.status = t.STATUS_CANCELED
t.save()
if t.pid:
kill_proc_tree(t.pid)
return {"status": True}
# do not using
@json_response
def rerun(request, job_id):
"""
Повторно выполнить конкретное задание по номеру job_id
:param request:
:param job_id:
:return:
"""
# TODO rewrite it there is no that TaskManager
t = get_object_or_404(JobDataModel, id=job_id)
task_manager = TaskManager.get_instance()
task_manager.rerun_task(t)
return {'task': t.to_dict()}
# USING
@json_response
def log(request, job_id):
"""
получить логи конкретного задания по номеру
из базы данных
если задание выполняется, то логи могут расти
:param request:
:param job_id:
:return:
"""
since = request.GET['since']
t = get_object_or_404(JobDataModel, id=job_id)
resp = {
'task': t.to_dict(),
'log_messages': [lm.to_dict() for lm in t.get_logs(since)]
}
return resp
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
sdk/management/samples/src/main/java/com/azure/management/appservice/samples/ManageFunctionAppLogs.java
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.management.appservice.samples;
import com.azure.core.http.policy.HttpLogDetailLevel;
import com.azure.management.Azure;
import com.azure.management.appservice.FunctionApp;
import com.azure.management.resources.fluentcore.arm.Region;
import com.azure.management.resources.fluentcore.utils.SdkContext;
import com.azure.management.samples.Utils;
import org.apache.commons.lang.time.StopWatch;
import reactor.core.publisher.BaseSubscriber;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Azure App Service basic sample for managing function apps.
* - Create a function app under the same new app service plan:
* - Deploy to app using FTP
* - stream logs synchronously for 30 seconds
* - stream logs asynchronously until 3 requests are completed
*/
public final class ManageFunctionAppLogs {
/**
* Main function which runs the actual sample.
* @param azure instance of the azure client
* @return true if sample runs successfully
*/
public static boolean runSample(Azure azure) {
// New resources
final String suffix = ".azurewebsites.net";
final String appName = azure.sdkContext().randomResourceName("webapp1-", 20);
final String appUrl = appName + suffix;
final String rgName = azure.sdkContext().randomResourceName("rg1NEMV_", 24);
try {
//============================================================
// Create a function app with a new app service plan
System.out.println("Creating function app " + appName + " in resource group " + rgName + "...");
FunctionApp app = azure.appServices().functionApps().define(appName)
.withRegion(Region.US_WEST)
.withNewResourceGroup(rgName)
.defineDiagnosticLogsConfiguration()
.withApplicationLogging()
.withLogLevel(com.azure.management.appservice.LogLevel.VERBOSE)
.withApplicationLogsStoredOnFileSystem()
.attach()
.create();
System.out.println("Created function app " + app.name());
Utils.print(app);
//============================================================
// Deploy to app 1 through FTP
System.out.println("Deploying a function app to " + appName + " through FTP...");
Utils.uploadFileToFunctionApp(app.getPublishingProfile(), "host.json", ManageFunctionAppLogs.class.getResourceAsStream("/square-function-app/host.json"));
Utils.uploadFileToFunctionApp(app.getPublishingProfile(), "square/function.json", ManageFunctionAppLogs.class.getResourceAsStream("/square-function-app/square/function.json"));
Utils.uploadFileToFunctionApp(app.getPublishingProfile(), "square/index.js", ManageFunctionAppLogs.class.getResourceAsStream("/square-function-app/square/index.js"));
// sync triggers
app.syncTriggers();
System.out.println("Deployment square app to function app " + app.name() + " completed");
Utils.print(app);
// warm up
System.out.println("Warming up " + appUrl + "/api/square...");
Utils.post("http://" + appUrl + "/api/square", "625");
SdkContext.sleep(5000);
//============================================================
// Listen to logs synchronously for 30 seconds
final InputStream stream = app.streamApplicationLogs();
System.out.println("Streaming logs from function app " + appName + "...");
String line = readLine(stream);
StopWatch stopWatch = new StopWatch();
stopWatch.start();
new Thread(() -> {
Utils.post("http://" + appUrl + "/api/square", "625");
SdkContext.sleep(10000);
Utils.post("http://" + appUrl + "/api/square", "725");
SdkContext.sleep(10000);
Utils.post("http://" + appUrl + "/api/square", "825");
}).start();
while (line != null && stopWatch.getTime() < 90000) {
System.out.println(line);
line = readLine(stream);
}
stream.close();
//============================================================
// Listen to logs asynchronously until 3 requests are completed
new Thread(() -> {
SdkContext.sleep(5000);
System.out.println("Starting hitting");
Utils.post("http://" + appUrl + "/api/square", "625");
SdkContext.sleep(10000);
Utils.post("http://" + appUrl + "/api/square", "725");
SdkContext.sleep(10000);
Utils.post("http://" + appUrl + "/api/square", "825");
}).start();
final AtomicInteger count = new AtomicInteger(0);
app.streamApplicationLogsAsync().subscribe(new BaseSubscriber<String>() {
@Override
protected void hookOnNext(String value) {
System.out.println(value);
if (value.contains("Function completed")) {
if (count.incrementAndGet() >= 3) {
this.dispose();
}
}
super.hookOnNext(value);
}
@Override
protected void hookOnError(Throwable throwable) {
throwable.printStackTrace();
super.hookOnError(throwable);
}
});
return true;
} catch (Exception e) {
System.err.println(e.getMessage());
e.printStackTrace();
} finally {
try {
System.out.println("Deleting Resource Group: " + rgName);
azure.resourceGroups().beginDeleteByName(rgName);
System.out.println("Deleted Resource Group: " + rgName);
} catch (NullPointerException npe) {
System.out.println("Did not create any resources in Azure. No clean up is necessary");
} catch (Exception g) {
g.printStackTrace();
}
}
return false;
}
/**
* Main entry point.
* @param args the parameters
*/
public static void main(String[] args) {
try {
//=============================================================
// Authenticate
final File credFile = new File(System.getenv("AZURE_AUTH_LOCATION"));
Azure azure = Azure
.configure()
.withLogLevel(HttpLogDetailLevel.BASIC)
.authenticate(credFile)
.withDefaultSubscription();
// Print selected subscription
System.out.println("Selected subscription: " + azure.subscriptionId());
runSample(azure);
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private static String readLine(InputStream in) throws IOException {
ByteArrayOutputStream stream = new ByteArrayOutputStream();
int c;
for (c = in.read(); c != '\n' && c >= 0; c = in.read()) {
stream.write(c);
}
if (c == -1 && stream.size() == 0) {
return null;
}
return stream.toString("UTF-8");
}
}
|
[
"\"AZURE_AUTH_LOCATION\""
] |
[] |
[
"AZURE_AUTH_LOCATION"
] |
[]
|
["AZURE_AUTH_LOCATION"]
|
java
| 1 | 0 | |
main.go
|
// Copyright 2016-2018 Yubico AB
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"net/http"
"os"
"os/signal"
"path"
"regexp"
"runtime"
"strings"
"syscall"
"time"
yaml "gopkg.in/yaml.v2"
"github.com/kardianos/service"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var (
// Host header whitelisting
hostHeaderWhitelisting bool
hostHeaderWhitelist = []string{"localhost", "localhost.", "127.0.0.1", "[::1]"}
)
type program struct {
srv *http.Server
}
func (p *program) Start(s service.Service) error {
addr := viper.GetString("listen")
p.srv = &http.Server{Addr: addr}
timeout := timeoutToMs(viper.GetUint32("timeout"))
serial, _ := ensureSerial(viper.GetString("serial")) // already validated by Cobra
http.HandleFunc("/connector/status", middlewareWrapper(func(w http.ResponseWriter, r *http.Request) {
statusHandler(w, r, timeout, serial)
}))
http.HandleFunc("/connector/api", middlewareWrapper(func(w http.ResponseWriter, r *http.Request) {
apiHandler(w, r, timeout, serial)
}))
if viper.GetBool("seccomp") {
log.Warn("seccomp support has been deprecated and the flag will be removed in future versions")
}
tls := false
cert := viper.GetString("cert")
key := viper.GetString("key")
if cert != "" && key != "" {
tls = true
}
log.WithFields(log.Fields{
"pid": os.Getpid(),
"listen": addr,
"TLS": tls,
}).Debug("takeoff")
go func(tls bool) {
if tls {
if err := p.srv.ListenAndServeTLS(cert, key); err != nil {
log.Printf("ListenAndServeTLS failure: %s", err)
}
} else {
if err := p.srv.ListenAndServe(); err != nil {
log.Printf("ListenAndServe failure: %s", err)
}
}
}(tls)
return nil
}
func (p *program) Stop(s service.Service) error {
return p.srv.Shutdown(nil)
}
//go:generate go run -mod=vendor version.in.go
func main() {
loggingInit(service.Interactive())
if !service.Interactive() {
if runtime.GOOS == "windows" {
viper.AddConfigPath(path.Join(os.Getenv("ProgramData"), "YubiHSM"))
} else {
// These paths will work for most UNIXy platforms. macOS may need something else.
configPaths := [2]string{"/etc", "/usr/local/etc"}
for _, configPath := range configPaths {
viper.AddConfigPath(path.Join(configPath, "yubihsm"))
}
}
}
svcConfig := &service.Config{
Name: "yhconsrv",
DisplayName: "YubiHSM Connector Service",
Description: "Implements the http-usb interface for the YubiHSM",
}
prg := &program{}
s, err := service.New(prg, svcConfig)
if err != nil {
log.Fatal(err)
return
}
signalChannel := make(chan os.Signal, 1)
signal.Notify(signalChannel, syscall.SIGINT, syscall.SIGTERM)
go func() {
signalEncountered := <-signalChannel
log.Info("Shutting down.")
// Put any process wide shutdown calls here
usbclose("Process terminate")
signal.Reset(signalEncountered)
os.Exit(0)
}()
rootCmd := &cobra.Command{
Use: "yubihsm-connector",
Long: `YubiHSM Connector v` + Version.String(),
SilenceUsage: true,
SilenceErrors: true,
PersistentPreRun: func(cmd *cobra.Command, args []string) {
if viper.GetBool("debug") {
log.SetLevel(log.DebugLevel)
}
config := viper.GetString("config")
if config != "" {
viper.SetConfigFile(config)
}
},
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
if err = viper.ReadInConfig(); err != nil {
if _, ok := err.(viper.ConfigFileNotFoundError); !ok {
return err
}
}
certkeyErr := fmt.Errorf("cert and key must both be specified")
if viper.GetString("cert") != "" && viper.GetString("key") == "" {
return certkeyErr
} else if viper.GetString("cert") == "" && viper.GetString("key") != "" {
return certkeyErr
}
serial, err := ensureSerial(viper.GetString("serial"))
if err != nil {
return err
}
log.WithFields(log.Fields{
"config": viper.ConfigFileUsed(),
"pid": os.Getpid(),
"seccomp": viper.GetBool("seccomp"),
"syslog": viper.GetBool("syslog"),
"version": Version.String(),
"cert": viper.GetString("cert"),
"key": viper.GetString("key"),
"timeout": timeoutToMs(viper.GetUint32("timeout")),
"serial": serial,
}).Debug("preflight complete")
return nil
},
RunE: func(cmd *cobra.Command, args []string) (err error) {
return s.Run()
},
}
rootCmd.PersistentFlags().StringP("config", "c", "", "config file")
viper.BindPFlag("config", rootCmd.PersistentFlags().Lookup("config"))
rootCmd.PersistentFlags().BoolP("debug", "d", false, "debug output")
viper.BindPFlag("debug", rootCmd.PersistentFlags().Lookup("debug"))
rootCmd.PersistentFlags().BoolP("seccomp", "s", false, "enable seccomp")
viper.BindPFlag("seccomp", rootCmd.PersistentFlags().Lookup("seccomp"))
rootCmd.PersistentFlags().StringP("cert", "", "", "certificate (X509)")
viper.BindPFlag("cert", rootCmd.PersistentFlags().Lookup("cert"))
rootCmd.PersistentFlags().StringP("key", "", "", "certificate key")
viper.BindPFlag("key", rootCmd.PersistentFlags().Lookup("key"))
rootCmd.PersistentFlags().StringP("serial", "", "", "device serial")
viper.BindPFlag("serial", rootCmd.PersistentFlags().Lookup("serial"))
rootCmd.PersistentFlags().StringP("listen", "l", "localhost:12345", "listen address")
viper.BindPFlag("listen", rootCmd.PersistentFlags().Lookup("listen"))
rootCmd.PersistentFlags().BoolP("syslog", "L", false, "log to syslog/eventlog")
viper.BindPFlag("syslog", rootCmd.PersistentFlags().Lookup("syslog"))
rootCmd.PersistentFlags().BoolVar(&hostHeaderWhitelisting, "enable-host-header-whitelist", false, "Enable Host header whitelisting")
viper.BindPFlag("enable-host-whitelist", rootCmd.PersistentFlags().Lookup("enable-host-header-whitelist"))
rootCmd.PersistentFlags().StringSliceVar(&hostHeaderWhitelist, "host-header-whitelist", hostHeaderWhitelist, "Host header whitelist")
viper.BindPFlag("host-whitelist", rootCmd.PersistentFlags().Lookup("host-header-whitelist"))
rootCmd.PersistentFlags().Uint32P("timeout", "t", 0, "USB operation timeout in milliseconds (default 0, never timeout)")
viper.BindPFlag("timeout", rootCmd.PersistentFlags().Lookup("timeout"))
configCmd := &cobra.Command{
Use: "config",
Long: `YubiHSM Connector configuration
Most configuration knobs for the connector are not available at the command
line, and must be supplied via a configurtion file.
listen: localhost:12345
syslog: false
cert: /path/to/certificate.crt
key: /path/to/certificate.key
serial: 0123456789
`,
}
configCheckCmd := &cobra.Command{
Use: "check",
Long: `Syntax check configuration`,
SilenceErrors: true,
SilenceUsage: true,
Run: func(cmd *cobra.Command, args []string) {
if err := viper.ReadInConfig(); err != nil {
log.WithFields(log.Fields{
"error": err,
"config": viper.ConfigFileUsed(),
}).Fatal("syntax errors in configuration file")
} else {
log.Info("OK!")
}
},
}
configGenCmd := &cobra.Command{
Use: "generate",
Long: `Generate a skeleton configuration from default values`,
SilenceErrors: true,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) (err error) {
var buf []byte
config := viper.AllSettings()
delete(config, "debug")
delete(config, "config")
delete(config, "seccomp")
if buf, err = yaml.Marshal(&config); err != nil {
return err
}
fmt.Fprintf(os.Stdout, "%s", buf)
return nil
},
}
versionCmd := &cobra.Command{
Use: "version",
Long: `Print program version`,
SilenceErrors: true,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) (err error) {
fmt.Fprintf(os.Stdout, "%s\n", Version.String())
return nil
},
}
installCmd := &cobra.Command{
Use: "install",
Long: "Install YubiHSM Connector service",
RunE: func(cmd *cobra.Command, args []string) error {
return s.Install()
},
}
uninstallCmd := &cobra.Command{
Use: "uninstall",
Long: "Uninstall YubiHSM Connector service",
RunE: func(cmd *cobra.Command, args []string) error {
return s.Uninstall()
},
}
startCmd := &cobra.Command{
Use: "start",
Long: "Starts YubiHSM Connector service",
RunE: func(cmd *cobra.Command, args []string) error {
return s.Start()
},
}
stopCmd := &cobra.Command{
Use: "stop",
Long: "Stops YubiHSM Connector service",
RunE: func(cmd *cobra.Command, args []string) error {
return s.Stop()
},
}
restartCmd := &cobra.Command{
Use: "restart",
Long: "Restarts YubiHSM Connector service",
RunE: func(cmd *cobra.Command, args []string) error {
return s.Restart()
},
}
configCmd.AddCommand(configCheckCmd, configGenCmd)
rootCmd.AddCommand(configCmd)
rootCmd.AddCommand(versionCmd)
rootCmd.AddCommand(installCmd)
rootCmd.AddCommand(uninstallCmd)
rootCmd.AddCommand(startCmd)
rootCmd.AddCommand(stopCmd)
rootCmd.AddCommand(restartCmd)
viper.SetConfigName("yubihsm-connector")
viper.SetEnvPrefix("YUBIHSM_CONNECTOR")
viper.AutomaticEnv()
if err := rootCmd.Execute(); err != nil {
log.Fatal(err)
}
}
// XXX(thorduri): Barf.
var errInvalidSerial = fmt.Errorf("invalid device serial")
func ensureSerial(s string) (string, error) {
if s == "" {
return "", nil
} else if len(s) > 10 {
return "", errInvalidSerial
}
n := 10 - len(s)
s = fmt.Sprintf("%s%s", strings.Repeat("0", n), s)
matched, err := regexp.MatchString("^[0-9]{10}$", s)
if err != nil {
return "", err
} else if !matched {
return "", errInvalidSerial
}
return s, nil
}
func timeoutToMs(t uint32) time.Duration {
return time.Duration(t) * time.Millisecond
}
|
[
"\"ProgramData\""
] |
[] |
[
"ProgramData"
] |
[]
|
["ProgramData"]
|
go
| 1 | 0 | |
kafka_schemas_acc_test.go
|
package aiven
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"math/rand"
"os"
"strconv"
"time"
)
var _ = Describe("Kafka ", func() {
var (
projectName string
project *Project
err error
)
Context("Kafka Schemas CRUD", func() {
It("should not error", func() {
projectName = os.Getenv("AIVEN_PROJECT_NAME")
project, err = client.Projects.Get(projectName)
Expect(err).NotTo(HaveOccurred())
})
It("should populate fields properly", func() {
Expect(project).NotTo(BeNil())
if project != nil {
Expect(project.Name).NotTo(BeEmpty())
Expect(project.AccountId).To(BeEmpty())
}
})
// kafka service
var (
serviceName string
service *Service
errS error
)
It("creating service", func() {
serviceName = "test-acc-kafka-sc-" + strconv.Itoa(rand.Int())
service, errS = client.Services.Create(projectName, CreateServiceRequest{
Cloud: "google-europe-west1",
Plan: "business-4",
ProjectVPCID: nil,
ServiceName: serviceName,
ServiceType: "kafka",
UserConfig: map[string]interface{}{
"schema_registry": true,
},
})
})
It("should not error", func() {
Expect(errS).NotTo(HaveOccurred())
})
It("should populate fields properly", func() {
Expect(service).NotTo(BeNil())
if service != nil {
Expect(service.Name).NotTo(BeEmpty())
Expect(service.Plan).NotTo(BeEmpty())
Expect(service.Type).Should(Equal("kafka"))
Eventually(func() string {
service, _ = client.Services.Get(projectName, serviceName)
return service.State
}, 25*time.Minute, 1*time.Minute).Should(Equal("RUNNING"))
}
})
// kafka schema
var (
errR error
errC error
subjectName string
)
It("create kafka schema subject", func() {
time.Sleep(25 * time.Second)
_, errC = client.KafkaGlobalSchemaConfig.Update(projectName, serviceName, KafkaSchemaConfig{
CompatibilityLevel: "BACKWARD",
})
time.Sleep(25 * time.Second)
subjectName = "test-subj"
_, errR = client.KafkaSubjectSchemas.Add(projectName, serviceName, subjectName, KafkaSchemaSubject{
Schema: `{
"doc": "example",
"fields": [{
"default": 5,
"doc": "my test number",
"name": "test",
"namespace": "test",
"type": "int"
}],
"name": "example",
"namespace": "example",
"type": "record"
}`})
})
It("should not error global config", func() {
Expect(errC).NotTo(HaveOccurred())
})
It("should not error subject", func() {
Expect(errR).NotTo(HaveOccurred())
})
It("should populate fields properly", func() {
s, errG := client.KafkaSubjectSchemas.Get(projectName, serviceName, subjectName, 1)
Expect(errG).NotTo(HaveOccurred())
Expect(s).NotTo(BeNil())
if s != nil {
Expect(s.Version.Schema).NotTo(BeEmpty())
Expect(s.Version.Subject).NotTo(BeEmpty())
Expect(s.Version.Version).To(Equal(1))
}
})
It("should update configuration", func() {
_, err := client.KafkaSubjectSchemas.GetConfiguration(projectName, serviceName, subjectName)
Expect(err).To(HaveOccurred())
Expect(IsNotFound(err)).To(Equal(true))
s, errU := client.KafkaSubjectSchemas.UpdateConfiguration(projectName, serviceName, subjectName, "FORWARD")
Expect(errU).NotTo(HaveOccurred())
Expect(s).NotTo(BeNil())
if s != nil {
Expect(s.CompatibilityLevel).Should(Equal("FORWARD"))
}
s2, errG := client.KafkaSubjectSchemas.GetConfiguration(projectName, serviceName, subjectName)
Expect(errG).NotTo(HaveOccurred())
Expect(s2).NotTo(BeNil())
if s2 != nil {
Expect(s2.CompatibilityLevel).Should(Equal("FORWARD"))
}
})
It("delete Kafka Schema subject and Kafka service", func() {
if errD := client.KafkaSubjectSchemas.Delete(projectName, serviceName, subjectName); errD != nil {
Fail("cannot delete kafka schema subject:" + errD.Error())
}
if errD := client.Services.Delete(projectName, serviceName); errD != nil {
Fail("cannot delete service:" + errD.Error())
}
})
})
})
|
[
"\"AIVEN_PROJECT_NAME\""
] |
[] |
[
"AIVEN_PROJECT_NAME"
] |
[]
|
["AIVEN_PROJECT_NAME"]
|
go
| 1 | 0 | |
lab2_sln/training/run_experiment.py
|
#!/usr/bin/env python
import argparse
import json
import importlib
from typing import Dict
import os
from training.util import train_model
DEFAULT_TRAIN_ARGS = {
'batch_size': 256,
'epochs': 8
}
def run_experiment(experiment_config: Dict, save_weights: bool, gpu_ind: int, use_wandb: bool=True):
"""
experiment_config is of the form
{
"dataset": "EmnistLinesDataset",
"dataset_args": {
"max_overlap": 0.4
},
"model": "LineModel",
"network": "line_cnn_sliding_window",
"network_args": {
"window_width": 14,
"window_stride": 7
},
"train_args": {
"batch_size": 128,
"epochs": 10
}
}
save_weights: if True, will save the final model weights to a canonical location (see Model in models/base.py)
gpu_ind: integer specifying which gpu to use
"""
print(f'Running experiment with config {experiment_config} on GPU {gpu_ind}')
datasets_module = importlib.import_module('text_recognizer.datasets')
dataset_class_ = getattr(datasets_module, experiment_config['dataset'])
dataset_args = experiment_config.get('dataset_args', {})
dataset = dataset_class_(**dataset_args)
dataset.load_or_generate_data()
print(dataset)
models_module = importlib.import_module('text_recognizer.models')
model_class_ = getattr(models_module, experiment_config['model'])
networks_module = importlib.import_module('text_recognizer.networks')
network_fn_ = getattr(networks_module, experiment_config['network'])
network_args = experiment_config.get('network_args', {})
model = model_class_(dataset_cls=dataset_class_, network_fn=network_fn_, dataset_args=dataset_args, network_args=network_args)
print(model)
experiment_config['train_args'] = {**DEFAULT_TRAIN_ARGS, **experiment_config.get('train_args', {})}
experiment_config['experiment_group'] = experiment_config.get('experiment_group', None)
experiment_config['gpu_ind'] = gpu_ind
train_model(
model,
dataset,
epochs=experiment_config['train_args']['epochs'],
batch_size=experiment_config['train_args']['batch_size'],
gpu_ind=gpu_ind,
use_wandb=use_wandb
)
score = model.evaluate(dataset.x_test, dataset.y_test)
print(f'Test evaluation: {score}')
if save_weights:
model.save_weights()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--gpu",
type=int,
default=0,
help="Provide index of GPU to use."
)
parser.add_argument(
"--save",
default=False,
dest='save',
action='store_true',
help="If true, then final weights will be saved to canonical, version-controlled location"
)
parser.add_argument(
"experiment_config",
type=str,
help="JSON of experiment to run (e.g. '{\"dataset\": \"EmnistDataset\", \"model\": \"CharacterModel\", \"network\": \"mlp\"}'"
)
args = parser.parse_args()
experiment_config = json.loads(args.experiment_config)
os.environ["CUDA_VISIBLE_DEVICES"] = f'{args.gpu}'
run_experiment(experiment_config, args.save, args.gpu)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
translator/app/app.py
|
from flask import Flask, render_template, request, url_for, redirect
import os
from modules.speech import rec
from modules import interactive
from modules import translate
from modules.translate import bpencode, detok, tok
from datetime import datetime
gen = None
app = Flask(__name__)
port = int(os.environ.get("PORT", 5000))
# def save_history(src, tgt):
# d = datetime.now()
# open("data/history/{}{}{}.history".format(d.year, d.month, d.day), "a", encoding="utf-8").write(src + " -> " + tgt + "\n")
@app.route('/')
def translate():
return redirect("/translate/ne-en")
@app.route('/translate/ne-en')
def ne_en():
global gen
gen = interactive.Generator("app/ne_en_bpe20000", "app/models/ne-en.pt")
return render_template("translate.html", title="Translate", active="translate", type="ne_en")
@app.route('/translate/ne-en/<string:sent>')
def ne_en_translate(sent):
translated = detok(gen.generate(bpencode(tok(sent, lang="ne"), "ne_en")), lang="en")
# save_history(sent, translated)
return render_template("transtext.html", data=translated)
@app.route('/translate/en-ne')
def en_ne():
global gen
gen = interactive.Generator("app/en_ne_bpe5000", "app/models/en-ne.pt")
return render_template("translate.html", title="Translate", active="translate", type="en_ne")
@app.route('/translate/en-ne/<string:sent>')
def en_ne_translate(sent):
translated = detok(gen.generate(bpencode(tok(sent, lang="en"), "en_ne")), lang="ne")
# save_history(sent, translated)
return render_template("transtext.html", data=translated)
@app.route('/listen')
def listen():
return render_template("listen.html", title="Listen", data=rec())
@app.route('/save/<string:str1>/<string:str2>')
def save(str1, str2):
with open("data/saved", "a", encoding="utf-8") as f:
f.write(str1 + " -> " + str2 + "\n")
return "s"
return "n"
@app.route('/saved')
def saved():
saved = open("data/saved", "r", encoding="utf-8").read().split('\n')
return render_template("listprint.html", data=saved)
@app.route('/history')
def history():
dates = [os.path.splitext(path)[0] for path in os.listdir("data/history")]
return render_template("history.html", data=dates)
@app.route('/history/<string:date>')
def history_date(date):
saved = open("data/history/"+date+".history", "r", encoding="utf-8").read().split("\n")
return render_template("listprint.html", data=saved)
@app.route('/about')
def about():
return render_template("about.html", title="About", active="about")
if __name__ == "__main__":
app.run(host='0.0.0.0', port=port, debug=True)
|
[] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
python
| 1 | 0 | |
psutil/_psaix.py
|
# Copyright (c) 2009, Giampaolo Rodola'
# Copyright (c) 2017, Arnon Yaari
# All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""AIX platform implementation."""
import functools
import glob
import os
import re
import subprocess
import sys
from collections import namedtuple
from . import _common
from . import _psposix
from . import _psutil_aix as cext
from . import _psutil_posix as cext_posix
from ._common import conn_to_ntuple
from ._common import get_procfs_path
from ._common import memoize_when_activated
from ._common import NIC_DUPLEX_FULL
from ._common import NIC_DUPLEX_HALF
from ._common import NIC_DUPLEX_UNKNOWN
from ._common import usage_percent
from ._compat import FileNotFoundError
from ._compat import PermissionError
from ._compat import ProcessLookupError
from ._compat import PY3
__extra__all__ = ["PROCFS_PATH"]
# =====================================================================
# --- globals
# =====================================================================
HAS_THREADS = hasattr(cext, "proc_threads")
HAS_NET_IO_COUNTERS = hasattr(cext, "net_io_counters")
HAS_PROC_IO_COUNTERS = hasattr(cext, "proc_io_counters")
PAGE_SIZE = os.sysconf('SC_PAGE_SIZE')
AF_LINK = cext_posix.AF_LINK
PROC_STATUSES = {
cext.SIDL: _common.STATUS_IDLE,
cext.SZOMB: _common.STATUS_ZOMBIE,
cext.SACTIVE: _common.STATUS_RUNNING,
cext.SSWAP: _common.STATUS_RUNNING, # TODO what status is this?
cext.SSTOP: _common.STATUS_STOPPED,
}
TCP_STATUSES = {
cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
cext.TCPS_SYN_RCVD: _common.CONN_SYN_RECV,
cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
cext.TCPS_CLOSED: _common.CONN_CLOSE,
cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
cext.TCPS_LISTEN: _common.CONN_LISTEN,
cext.TCPS_CLOSING: _common.CONN_CLOSING,
cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
}
proc_info_map = dict(
ppid=0,
rss=1,
vms=2,
create_time=3,
nice=4,
num_threads=5,
status=6,
ttynr=7)
# These objects get set on "import psutil" from the __init__.py
# file, see: https://github.com/giampaolo/psutil/issues/1402
NoSuchProcess = None
ZombieProcess = None
AccessDenied = None
TimeoutExpired = None
# =====================================================================
# --- named tuples
# =====================================================================
# psutil.Process.memory_info()
pmem = namedtuple('pmem', ['rss', 'vms'])
# psutil.Process.memory_full_info()
pfullmem = pmem
# psutil.Process.cpu_times()
scputimes = namedtuple('scputimes', ['user', 'system', 'idle', 'iowait'])
# psutil.virtual_memory()
svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
# =====================================================================
# --- memory
# =====================================================================
def virtual_memory():
total, avail, free, pinned, inuse = cext.virtual_mem()
percent = usage_percent((total - avail), total, round_=1)
return svmem(total, avail, percent, inuse, free)
def swap_memory():
"""Swap system memory as a (total, used, free, sin, sout) tuple."""
total, free, sin, sout = cext.swap_mem()
used = total - free
percent = usage_percent(used, total, round_=1)
return _common.sswap(total, used, free, percent, sin, sout)
# =====================================================================
# --- CPU
# =====================================================================
def cpu_times():
"""Return system-wide CPU times as a named tuple"""
ret = cext.per_cpu_times()
return scputimes(*[sum(x) for x in zip(*ret)])
def per_cpu_times():
"""Return system per-CPU times as a list of named tuples"""
ret = cext.per_cpu_times()
return [scputimes(*x) for x in ret]
def cpu_count_logical():
"""Return the number of logical CPUs in the system."""
try:
return os.sysconf("SC_NPROCESSORS_ONLN")
except ValueError:
# mimic os.cpu_count() behavior
return None
def cpu_count_physical():
cmd = "lsdev -Cc processor"
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if PY3:
stdout, stderr = [x.decode(sys.stdout.encoding)
for x in (stdout, stderr)]
if p.returncode != 0:
raise RuntimeError("%r command error\n%s" % (cmd, stderr))
processors = stdout.strip().splitlines()
return len(processors) or None
def cpu_stats():
"""Return various CPU stats as a named tuple."""
ctx_switches, interrupts, soft_interrupts, syscalls = cext.cpu_stats()
return _common.scpustats(
ctx_switches, interrupts, soft_interrupts, syscalls)
# =====================================================================
# --- disks
# =====================================================================
disk_io_counters = cext.disk_io_counters
disk_usage = _psposix.disk_usage
def disk_partitions(all=False):
"""Return system disk partitions."""
# TODO - the filtering logic should be better checked so that
# it tries to reflect 'df' as much as possible
retlist = []
partitions = cext.disk_partitions()
for partition in partitions:
device, mountpoint, fstype, opts = partition
if device == 'none':
device = ''
if not all:
# Differently from, say, Linux, we don't have a list of
# common fs types so the best we can do, AFAIK, is to
# filter by filesystem having a total size > 0.
if not disk_usage(mountpoint).total:
continue
ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
retlist.append(ntuple)
return retlist
# =====================================================================
# --- network
# =====================================================================
net_if_addrs = cext_posix.net_if_addrs
if HAS_NET_IO_COUNTERS:
net_io_counters = cext.net_io_counters
def net_connections(kind, _pid=-1):
"""Return socket connections. If pid == -1 return system-wide
connections (as opposed to connections opened by one process only).
"""
cmap = _common.conn_tmap
if kind not in cmap:
raise ValueError("invalid %r kind argument; choose between %s"
% (kind, ', '.join([repr(x) for x in cmap])))
families, types = _common.conn_tmap[kind]
rawlist = cext.net_connections(_pid)
ret = []
for item in rawlist:
fd, fam, type_, laddr, raddr, status, pid = item
if fam not in families:
continue
if type_ not in types:
continue
nt = conn_to_ntuple(fd, fam, type_, laddr, raddr, status,
TCP_STATUSES, pid=pid if _pid == -1 else None)
ret.append(nt)
return ret
def net_if_stats():
"""Get NIC stats (isup, duplex, speed, mtu)."""
duplex_map = {"Full": NIC_DUPLEX_FULL,
"Half": NIC_DUPLEX_HALF}
names = set([x[0] for x in net_if_addrs()])
ret = {}
for name in names:
isup, mtu = cext.net_if_stats(name)
# try to get speed and duplex
# TODO: rewrite this in C (entstat forks, so use truss -f to follow.
# looks like it is using an undocumented ioctl?)
duplex = ""
speed = 0
p = subprocess.Popen(["/usr/bin/entstat", "-d", name],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if PY3:
stdout, stderr = [x.decode(sys.stdout.encoding)
for x in (stdout, stderr)]
if p.returncode == 0:
re_result = re.search(
r"Running: (\d+) Mbps.*?(\w+) Duplex", stdout)
if re_result is not None:
speed = int(re_result.group(1))
duplex = re_result.group(2)
duplex = duplex_map.get(duplex, NIC_DUPLEX_UNKNOWN)
ret[name] = _common.snicstats(isup, duplex, speed, mtu)
return ret
# =====================================================================
# --- other system functions
# =====================================================================
def boot_time():
"""The system boot time expressed in seconds since the epoch."""
return cext.boot_time()
def users():
"""Return currently connected users as a list of namedtuples."""
retlist = []
rawlist = cext.users()
localhost = (':0.0', ':0')
for item in rawlist:
user, tty, hostname, tstamp, user_process, pid = item
# note: the underlying C function includes entries about
# system boot, run level and others. We might want
# to use them in the future.
if not user_process:
continue
if hostname in localhost:
hostname = 'localhost'
nt = _common.suser(user, tty, hostname, tstamp, pid)
retlist.append(nt)
return retlist
# =====================================================================
# --- processes
# =====================================================================
def pids():
"""Returns a list of PIDs currently running on the system."""
return [int(x) for x in os.listdir(get_procfs_path()) if x.isdigit()]
def pid_exists(pid):
"""Check for the existence of a unix pid."""
return os.path.exists(os.path.join(get_procfs_path(), str(pid), "psinfo"))
def wrap_exceptions(fun):
"""Call callable into a try/except clause and translate ENOENT,
EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.
"""
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except (FileNotFoundError, ProcessLookupError):
# ENOENT (no such file or directory) gets raised on open().
# ESRCH (no such process) can get raised on read() if
# process is gone in meantime.
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._name)
else:
raise ZombieProcess(self.pid, self._name, self._ppid)
except PermissionError:
raise AccessDenied(self.pid, self._name)
return wrapper
class Process(object):
"""Wrapper class around underlying C implementation."""
__slots__ = ["pid", "_name", "_ppid", "_procfs_path", "_cache"]
def __init__(self, pid):
self.pid = pid
self._name = None
self._ppid = None
self._procfs_path = get_procfs_path()
def oneshot_enter(self):
self._proc_basic_info.cache_activate(self)
self._proc_cred.cache_activate(self)
def oneshot_exit(self):
self._proc_basic_info.cache_deactivate(self)
self._proc_cred.cache_deactivate(self)
@wrap_exceptions
@memoize_when_activated
def _proc_basic_info(self):
return cext.proc_basic_info(self.pid, self._procfs_path)
@wrap_exceptions
@memoize_when_activated
def _proc_cred(self):
return cext.proc_cred(self.pid, self._procfs_path)
@wrap_exceptions
def name(self):
if self.pid == 0:
return "swapper"
# note: max 16 characters
return cext.proc_name(self.pid, self._procfs_path).rstrip("\x00")
@wrap_exceptions
def exe(self):
# there is no way to get executable path in AIX other than to guess,
# and guessing is more complex than what's in the wrapping class
cmdline = self.cmdline()
if not cmdline:
return ''
exe = cmdline[0]
if os.path.sep in exe:
# relative or absolute path
if not os.path.isabs(exe):
# if cwd has changed, we're out of luck - this may be wrong!
exe = os.path.abspath(os.path.join(self.cwd(), exe))
if (os.path.isabs(exe) and
os.path.isfile(exe) and
os.access(exe, os.X_OK)):
return exe
# not found, move to search in PATH using basename only
exe = os.path.basename(exe)
# search for exe name PATH
for path in os.environ["PATH"].split(":"):
possible_exe = os.path.abspath(os.path.join(path, exe))
if (os.path.isfile(possible_exe) and
os.access(possible_exe, os.X_OK)):
return possible_exe
return ''
@wrap_exceptions
def cmdline(self):
return cext.proc_args(self.pid)
@wrap_exceptions
def environ(self):
return cext.proc_environ(self.pid)
@wrap_exceptions
def create_time(self):
return self._proc_basic_info()[proc_info_map['create_time']]
@wrap_exceptions
def num_threads(self):
return self._proc_basic_info()[proc_info_map['num_threads']]
if HAS_THREADS:
@wrap_exceptions
def threads(self):
rawlist = cext.proc_threads(self.pid)
retlist = []
for thread_id, utime, stime in rawlist:
ntuple = _common.pthread(thread_id, utime, stime)
retlist.append(ntuple)
# The underlying C implementation retrieves all OS threads
# and filters them by PID. At this point we can't tell whether
# an empty list means there were no connections for process or
# process is no longer active so we force NSP in case the PID
# is no longer there.
if not retlist:
# will raise NSP if process is gone
os.stat('%s/%s' % (self._procfs_path, self.pid))
return retlist
@wrap_exceptions
def connections(self, kind='inet'):
ret = net_connections(kind, _pid=self.pid)
# The underlying C implementation retrieves all OS connections
# and filters them by PID. At this point we can't tell whether
# an empty list means there were no connections for process or
# process is no longer active so we force NSP in case the PID
# is no longer there.
if not ret:
# will raise NSP if process is gone
os.stat('%s/%s' % (self._procfs_path, self.pid))
return ret
@wrap_exceptions
def nice_get(self):
return cext_posix.getpriority(self.pid)
@wrap_exceptions
def nice_set(self, value):
return cext_posix.setpriority(self.pid, value)
@wrap_exceptions
def ppid(self):
self._ppid = self._proc_basic_info()[proc_info_map['ppid']]
return self._ppid
@wrap_exceptions
def uids(self):
real, effective, saved, _, _, _ = self._proc_cred()
return _common.puids(real, effective, saved)
@wrap_exceptions
def gids(self):
_, _, _, real, effective, saved = self._proc_cred()
return _common.puids(real, effective, saved)
@wrap_exceptions
def cpu_times(self):
cpu_times = cext.proc_cpu_times(self.pid, self._procfs_path)
return _common.pcputimes(*cpu_times)
@wrap_exceptions
def terminal(self):
ttydev = self._proc_basic_info()[proc_info_map['ttynr']]
# convert from 64-bit dev_t to 32-bit dev_t and then map the device
ttydev = (((ttydev & 0x0000FFFF00000000) >> 16) | (ttydev & 0xFFFF))
# try to match rdev of /dev/pts/* files ttydev
for dev in glob.glob("/dev/**/*"):
if os.stat(dev).st_rdev == ttydev:
return dev
return None
@wrap_exceptions
def cwd(self):
procfs_path = self._procfs_path
try:
result = os.readlink("%s/%s/cwd" % (procfs_path, self.pid))
return result.rstrip('/')
except FileNotFoundError:
os.stat("%s/%s" % (procfs_path, self.pid)) # raise NSP or AD
return None
@wrap_exceptions
def memory_info(self):
ret = self._proc_basic_info()
rss = ret[proc_info_map['rss']] * 1024
vms = ret[proc_info_map['vms']] * 1024
return pmem(rss, vms)
memory_full_info = memory_info
@wrap_exceptions
def status(self):
code = self._proc_basic_info()[proc_info_map['status']]
# XXX is '?' legit? (we're not supposed to return it anyway)
return PROC_STATUSES.get(code, '?')
def open_files(self):
# TODO rewrite without using procfiles (stat /proc/pid/fd/* and then
# find matching name of the inode)
p = subprocess.Popen(["/usr/bin/procfiles", "-n", str(self.pid)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if PY3:
stdout, stderr = [x.decode(sys.stdout.encoding)
for x in (stdout, stderr)]
if "no such process" in stderr.lower():
raise NoSuchProcess(self.pid, self._name)
procfiles = re.findall(r"(\d+): S_IFREG.*\s*.*name:(.*)\n", stdout)
retlist = []
for fd, path in procfiles:
path = path.strip()
if path.startswith("//"):
path = path[1:]
if path.lower() == "cannot be retrieved":
continue
retlist.append(_common.popenfile(path, int(fd)))
return retlist
@wrap_exceptions
def num_fds(self):
if self.pid == 0: # no /proc/0/fd
return 0
return len(os.listdir("%s/%s/fd" % (self._procfs_path, self.pid)))
@wrap_exceptions
def num_ctx_switches(self):
return _common.pctxsw(
*cext.proc_num_ctx_switches(self.pid))
@wrap_exceptions
def wait(self, timeout=None):
return _psposix.wait_pid(self.pid, timeout, self._name)
if HAS_PROC_IO_COUNTERS:
@wrap_exceptions
def io_counters(self):
try:
rc, wc, rb, wb = cext.proc_io_counters(self.pid)
except OSError:
# if process is terminated, proc_io_counters returns OSError
# instead of NSP
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._name)
raise
return _common.pio(rc, wc, rb, wb)
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
phpweb/phpweb.go
|
package phpweb
import (
"os"
"path/filepath"
"strings"
"github.com/paketo-buildpacks/php-web/config"
"github.com/cloudfoundry/libcfbuildpack/buildpack"
)
const (
// Dependency in the buildplan indicates that this is a web app
Dependency = "php-web"
)
// Version returns the selected version of PHP using the following precedence:
//
// 1. `php.version` from `buildpack.yml`
// 2. Build Plan Version, if set by composer
// 3. Buildpack Metadata "default_version"
// 4. `*` which should pick latest version
func Version(buildpack buildpack.Buildpack) string {
if version, ok := buildpack.Metadata["default_version"].(string); ok {
return version
}
return "*"
}
// LoadAvailablePHPExtensions locates available extensions and returns the list
func LoadAvailablePHPExtensions() ([]string, error) {
extensions, err := filepath.Glob(filepath.Join(os.Getenv("PHP_EXTENSION_DIR"), "*"))
if err != nil {
return []string{}, err
}
for i := 0; i < len(extensions); i++ {
extensions[i] = strings.Trim(filepath.Base(extensions[i]), ".so")
}
return extensions, nil
}
// PickWebDir will select the correct web directory to use
func PickWebDir(buildpackYAML config.BuildpackYAML) string {
if buildpackYAML.Config.WebDirectory != "" {
return buildpackYAML.Config.WebDirectory
}
return "htdocs"
}
// SearchForWebApp looks to see if this application is a PHP web app
func SearchForWebApp(appRoot string, webdir string) (bool, error) {
matchList, err := filepath.Glob(filepath.Join(appRoot, webdir, "*.php"))
if err != nil {
return false, err
}
if len(matchList) > 0 {
return true, nil
}
return false, nil
}
// Metadata is used solely for providing `Identity()`
type Metadata struct {
Name string
Hash string
}
// Identity provides libcfbuildpack with information to decide if it should contribute
func (m Metadata) Identity() (name string, version string) {
return m.Name, m.Hash
}
|
[
"\"PHP_EXTENSION_DIR\""
] |
[] |
[
"PHP_EXTENSION_DIR"
] |
[]
|
["PHP_EXTENSION_DIR"]
|
go
| 1 | 0 | |
experiments/chat/turbotutorial/wsgi.py
|
"""
WSGI config for turbotutorial project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'turbotutorial.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
test/integration/plugin_test.go
|
package integration
import (
"os"
"reflect"
"strings"
"testing"
"github.com/kubernetes-incubator/kube-aws/core/root"
"github.com/kubernetes-incubator/kube-aws/core/root/config"
"github.com/kubernetes-incubator/kube-aws/model"
"github.com/kubernetes-incubator/kube-aws/plugin"
"github.com/kubernetes-incubator/kube-aws/test/helper"
)
func TestPlugin(t *testing.T) {
kubeAwsSettings := newKubeAwsSettingsFromEnv(t)
s3URI, s3URIExists := os.LookupEnv("KUBE_AWS_S3_DIR_URI")
if !s3URIExists || s3URI == "" {
s3URI = "s3://examplebucket/exampledir"
t.Logf(`Falling back s3URI to a stub value "%s" for tests of validating stack templates. No assets will actually be uploaded to S3`, s3URI)
}
minimalValidConfigYaml := kubeAwsSettings.minimumValidClusterYamlWithAZ("c")
validCases := []struct {
context string
clusterYaml string
plugins []helper.TestPlugin
assertConfig []ConfigTester
assertCluster []ClusterTester
}{
{
context: "WithAddons",
clusterYaml: minimalValidConfigYaml + `
kubeAwsPlugins:
myPlugin:
enabled: true
queue:
name: baz1
oidc:
issuer:
url: "https://login.example.com/"
worker:
nodePools:
- name: pool1
kubeAwsPlugins:
myPlugin:
enabled: true
queue:
name: baz2
`,
plugins: []helper.TestPlugin{
helper.TestPlugin{
Name: "my-plugin",
Files: map[string]string{
"assets/controller/baz.txt": "controller-baz",
"assets/etcd/baz.txt": "etcd-baz",
"assets/worker/baz.txt": "worker-baz",
},
Yaml: `
metadata:
name: my-plugin
version: 0.0.1
spec:
configuration:
# This is the defaults for the values passed to templates like:
# * cloudformation.stacks.{controlPlane,nodePool,root}.resources.append and
# * kubernetes.apiserer.flags[].value
#
# The defaults can be overridden from cluster.yaml via:
# * kubeAwsPlugins.pluginName.* and
# * worker.nodePools[].kubeAwsPlugins.pluginName.*
values:
queue:
name: bar
oidc:
issuer:
url: unspecified
cloudformation:
stacks:
controlPlane:
resources:
append:
inline: |
{
"QueueFromMyPlugin": {
"Type": "AWS::SQS::Queue",
"Properties": {
"QueueName": {{quote .Values.queue.name}}
}
}
}
nodePool:
resources:
append:
inline: |
{
"QueueFromMyPlugin": {
"Type": "AWS::SQS::Queue",
"Properties": {
"QueueName": {{quote .Values.queue.name}}
}
}
}
root:
resources:
append:
inline: |
{
"QueueFromMyPlugin": {
"Type": "AWS::SQS::Queue",
"Properties": {
"QueueName": {{quote .Values.queue.name}}
}
}
}
kubernetes:
apiserver:
flags:
- name: "oidc-issuer-url"
value: "{{ .Values.oidc.issuer.url}}"
volumes:
- name: "mycreds"
path: "/etc/my/creds"
node:
roles:
controller:
iam:
policy:
statements:
- actions:
- "ec2:Describe*"
effect: "Allow"
resources:
- "*"
kubelet:
nodeLabels:
role: controller
systemd:
units:
- name: save-queue-name.service
contents:
inline: |
[Unit]
storage:
files:
- path: /var/kube-aws/bar.txt
permissions: 0644
contents:
inline: controller-bar
- path: /var/kube-aws/baz.txt
permissions: 0644
contents:
source:
path: assets/controller/baz.txt
etcd:
iam:
policy:
statements:
- actions:
- "ec2:Describe*"
effect: "Allow"
resources:
- "*"
systemd:
units:
- name: save-queue-name.service
contents:
inline: |
[Unit]
storage:
files:
- path: /var/kube-aws/bar.txt
permissions: 0644
contents:
inline: etcd-bar
- path: /var/kube-aws/baz.txt
permissions: 0644
contents:
source:
path: assets/etcd/baz.txt
worker:
iam:
policy:
statements:
- actions:
- "ec2:*"
effect: "Allow"
resources:
- "*"
kubelet:
nodeLabels:
role: worker
featureGates:
Accelerators: "true"
systemd:
units:
- name: save-queue-name.service
contents:
inline: |
[Unit]
storage:
files:
- path: /var/kube-aws/bar.txt
permissions: 0644
contents:
inline: worker-bar
- path: /var/kube-aws/baz.txt
permissions: 0644
contents:
source:
path: assets/worker/baz.txt
`,
},
},
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
cp := c.PluginConfigs["myPlugin"]
if !cp.Enabled {
t.Errorf("The plugin should have been enabled: %+v", cp)
}
if q, ok := cp.Values["queue"].(map[string]interface{}); ok {
if m, ok := q["name"].(string); ok {
if m != "baz1" {
t.Errorf("The plugin should have queue.name set to \"baz1\", but was set to \"%s\"", m)
}
}
}
np := c.NodePools[0].Plugins["myPlugin"]
if !np.Enabled {
t.Errorf("The plugin should have been enabled: %+v", np)
}
if q, ok := np.Values["queue"].(map[string]interface{}); ok {
if m, ok := q["name"].(string); ok {
if m != "baz2" {
t.Errorf("The plugin should have queue.name set to \"baz2\", but was set to \"%s\"", m)
}
}
}
},
},
assertCluster: []ClusterTester{
func(c root.Cluster, t *testing.T) {
cp := c.ControlPlane()
np := c.NodePools()[0]
{
e := model.CustomFile{
Path: "/var/kube-aws/bar.txt",
Permissions: 0644,
Content: "controller-bar",
}
a := cp.StackConfig.Controller.CustomFiles[0]
if !reflect.DeepEqual(e, a) {
t.Errorf("Unexpected controller custom file from plugin: expected=%v actual=%v", e, a)
}
}
{
e := model.CustomFile{
Path: "/var/kube-aws/baz.txt",
Permissions: 0644,
Content: "controller-baz",
}
a := cp.StackConfig.Controller.CustomFiles[1]
if !reflect.DeepEqual(e, a) {
t.Errorf("Unexpected controller custom file from plugin: expected=%v actual=%v", e, a)
}
}
{
e := model.IAMPolicyStatements{
model.IAMPolicyStatement{
Effect: "Allow",
Actions: []string{"ec2:Describe*"},
Resources: []string{"*"},
},
}
a := cp.StackConfig.Controller.IAMConfig.Policy.Statements
if !reflect.DeepEqual(e, a) {
t.Errorf("Unexpected controller iam policy statements from plugin: expected=%v actual=%v", e, a)
}
}
{
e := model.CustomFile{
Path: "/var/kube-aws/bar.txt",
Permissions: 0644,
Content: "etcd-bar",
}
a := cp.StackConfig.Etcd.CustomFiles[0]
if !reflect.DeepEqual(e, a) {
t.Errorf("Unexpected etcd custom file from plugin: expected=%v actual=%v", e, a)
}
}
{
e := model.CustomFile{
Path: "/var/kube-aws/baz.txt",
Permissions: 0644,
Content: "etcd-baz",
}
a := cp.StackConfig.Etcd.CustomFiles[1]
if !reflect.DeepEqual(e, a) {
t.Errorf("Unexpected etcd custom file from plugin: expected=%v actual=%v", e, a)
}
}
{
e := model.IAMPolicyStatements{
model.IAMPolicyStatement{
Effect: "Allow",
Actions: []string{"ec2:Describe*"},
Resources: []string{"*"},
},
}
a := cp.StackConfig.Etcd.IAMConfig.Policy.Statements
if !reflect.DeepEqual(e, a) {
t.Errorf("Unexpected etcd iam policy statements from plugin: expected=%v actual=%v", e, a)
}
}
{
e := model.CustomFile{
Path: "/var/kube-aws/bar.txt",
Permissions: 0644,
Content: "worker-bar",
}
a := np.StackConfig.CustomFiles[0]
if !reflect.DeepEqual(e, a) {
t.Errorf("Unexpected worker custom file from plugin: expected=%v actual=%v", e, a)
}
}
{
e := model.CustomFile{
Path: "/var/kube-aws/baz.txt",
Permissions: 0644,
Content: "worker-baz",
}
a := np.StackConfig.CustomFiles[1]
if !reflect.DeepEqual(e, a) {
t.Errorf("Unexpected worker custom file from plugin: expected=%v actual=%v", e, a)
}
}
{
e := model.IAMPolicyStatements{
model.IAMPolicyStatement{
Effect: "Allow",
Actions: []string{"ec2:*"},
Resources: []string{"*"},
},
}
a := np.StackConfig.IAMConfig.Policy.Statements
if !reflect.DeepEqual(e, a) {
t.Errorf("Unexpected worker iam policy statements from plugin: expected=%v actual=%v", e, a)
}
}
// A kube-aws plugin can inject systemd units
controllerUserdataS3Part := cp.UserDataController.Parts[model.USERDATA_S3].Asset.Content
if !strings.Contains(controllerUserdataS3Part, "save-queue-name.service") {
t.Errorf("Invalid controller userdata: %v", controllerUserdataS3Part)
}
etcdUserdataS3Part := cp.UserDataEtcd.Parts[model.USERDATA_S3].Asset.Content
if !strings.Contains(etcdUserdataS3Part, "save-queue-name.service") {
t.Errorf("Invalid etcd userdata: %v", etcdUserdataS3Part)
}
workerUserdataS3Part := np.UserDataWorker.Parts[model.USERDATA_S3].Asset.Content
if !strings.Contains(workerUserdataS3Part, "save-queue-name.service") {
t.Errorf("Invalid worker userdata: %v", workerUserdataS3Part)
}
// A kube-aws plugin can inject custom cfn stack resources
controlPlaneStackTemplate, err := cp.RenderStackTemplateAsString()
if err != nil {
t.Errorf("failed to render control-plane stack template: %v", err)
}
if !strings.Contains(controlPlaneStackTemplate, "QueueFromMyPlugin") {
t.Errorf("Invalid control-plane stack template: missing resource QueueFromMyPlugin: %v", controlPlaneStackTemplate)
}
if !strings.Contains(controlPlaneStackTemplate, `"QueueName":"baz1"`) {
t.Errorf("Invalid control-plane stack template: missing QueueName baz1: %v", controlPlaneStackTemplate)
}
if !strings.Contains(controlPlaneStackTemplate, `"Action":["ec2:Describe*"]`) {
t.Errorf("Invalid control-plane stack template: missing iam policy statement ec2:Describe*: %v", controlPlaneStackTemplate)
}
rootStackTemplate, err := c.RenderStackTemplateAsString()
if err != nil {
t.Errorf("failed to render root stack template: %v", err)
}
if !strings.Contains(rootStackTemplate, "QueueFromMyPlugin") {
t.Errorf("Invalid root stack template: missing resource QueueFromMyPlugin: %v", rootStackTemplate)
}
if !strings.Contains(rootStackTemplate, `"QueueName":"baz1"`) {
t.Errorf("Invalid root stack template: missing QueueName baz1: %v", rootStackTemplate)
}
nodePoolStackTemplate, err := np.RenderStackTemplateAsString()
if err != nil {
t.Errorf("failed to render worker node pool stack template: %v", err)
}
if !strings.Contains(nodePoolStackTemplate, "QueueFromMyPlugin") {
t.Errorf("Invalid worker node pool stack template: missing resource QueueFromMyPlugin: %v", nodePoolStackTemplate)
}
if !strings.Contains(nodePoolStackTemplate, `"QueueName":"baz2"`) {
t.Errorf("Invalid worker node pool stack template: missing QueueName baz2: %v", nodePoolStackTemplate)
}
if !strings.Contains(nodePoolStackTemplate, `"QueueName":"baz2"`) {
t.Errorf("Invalid worker node pool stack template: missing QueueName baz2: %v", nodePoolStackTemplate)
}
if !strings.Contains(nodePoolStackTemplate, `"Action":["ec2:*"]`) {
t.Errorf("Invalid worker node pool stack template: missing iam policy statement ec2:*: %v", nodePoolStackTemplate)
}
// A kube-aws plugin can inject node labels
if !strings.Contains(controllerUserdataS3Part, "role=controller") {
t.Error("missing controller node label: role=controller")
}
if !strings.Contains(workerUserdataS3Part, "role=worker") {
t.Error("missing worker node label: role=worker")
}
// A kube-aws plugin can activate feature gates
if !strings.Contains(workerUserdataS3Part, `--feature-gates="Accelerators=true"`) {
t.Error("missing worker feature gate: Accelerators=true")
}
// A kube-aws plugin can add volume mounts to apiserver pod
if !strings.Contains(controllerUserdataS3Part, `mountPath: "/etc/my/creds"`) {
t.Errorf("missing apiserver volume mount: /etc/my/creds")
}
// A kube-aws plugin can add volumes to apiserver pod
if !strings.Contains(controllerUserdataS3Part, `path: "/etc/my/creds"`) {
t.Errorf("missing apiserver volume: /etc/my/creds")
}
// A kube-aws plugin can add flags to apiserver
if !strings.Contains(controllerUserdataS3Part, `--oidc-issuer-url=https://login.example.com/`) {
t.Errorf("missing apiserver flag: --oidc-issuer-url=https://login.example.com/")
}
},
},
},
}
for _, validCase := range validCases {
t.Run(validCase.context, func(t *testing.T) {
helper.WithPlugins(validCase.plugins, func() {
plugins, err := plugin.LoadAll()
if err != nil {
t.Errorf("failed to load plugins: %v", err)
t.FailNow()
}
if len(plugins) != len(validCase.plugins) {
t.Errorf("failed to load plugins: expected %d plugins but loaded %d plugins", len(validCase.plugins), len(plugins))
t.FailNow()
}
configBytes := validCase.clusterYaml
providedConfig, err := config.ConfigFromBytesWithEncryptService([]byte(configBytes), plugins, helper.DummyEncryptService{})
if err != nil {
t.Errorf("failed to parse config %s: %v", configBytes, err)
t.FailNow()
}
t.Run("AssertConfig", func(t *testing.T) {
for _, assertion := range validCase.assertConfig {
assertion(providedConfig, t)
}
})
helper.WithDummyCredentials(func(dummyAssetsDir string) {
var stackTemplateOptions = root.NewOptions(s3URI, false, false)
stackTemplateOptions.AssetsDir = dummyAssetsDir
stackTemplateOptions.ControllerTmplFile = "../../core/controlplane/config/templates/cloud-config-controller"
stackTemplateOptions.WorkerTmplFile = "../../core/controlplane/config/templates/cloud-config-worker"
stackTemplateOptions.EtcdTmplFile = "../../core/controlplane/config/templates/cloud-config-etcd"
stackTemplateOptions.RootStackTemplateTmplFile = "../../core/root/config/templates/stack-template.json"
stackTemplateOptions.NodePoolStackTemplateTmplFile = "../../core/nodepool/config/templates/stack-template.json"
stackTemplateOptions.ControlPlaneStackTemplateTmplFile = "../../core/controlplane/config/templates/stack-template.json"
cluster, err := root.ClusterFromConfig(providedConfig, stackTemplateOptions, false)
if err != nil {
t.Errorf("failed to create cluster driver : %v", err)
t.FailNow()
}
t.Run("AssertCluster", func(t *testing.T) {
for _, assertion := range validCase.assertCluster {
assertion(cluster, t)
}
})
t.Run("ValidateTemplates", func(t *testing.T) {
if err := cluster.ValidateTemplates(); err != nil {
t.Errorf("failed to render stack template: %v", err)
}
})
if os.Getenv("KUBE_AWS_INTEGRATION_TEST") == "" {
t.Skipf("`export KUBE_AWS_INTEGRATION_TEST=1` is required to run integration tests. Skipping.")
t.SkipNow()
} else {
t.Run("ValidateStack", func(t *testing.T) {
if !s3URIExists {
t.Errorf("failed to obtain value for KUBE_AWS_S3_DIR_URI")
t.FailNow()
}
report, err := cluster.ValidateStack()
if err != nil {
t.Errorf("failed to validate stack: %s %v", report, err)
}
})
}
})
})
})
}
}
|
[
"\"KUBE_AWS_INTEGRATION_TEST\""
] |
[] |
[
"KUBE_AWS_INTEGRATION_TEST"
] |
[]
|
["KUBE_AWS_INTEGRATION_TEST"]
|
go
| 1 | 0 | |
tests/test_pytest_cov.py
|
import collections
import glob
import os
import platform
import re
import subprocess
import sys
from itertools import chain
import coverage
import py
import pytest
import virtualenv
import xdist
from fields import Namespace
from process_tests import TestProcess as _TestProcess
from process_tests import dump_on_error
from process_tests import wait_for_strings
from six import exec_
import pytest_cov.plugin
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
coverage, platform # required for skipif mark on test_cov_min_from_coveragerc
max_worker_restart_0 = "--max-worker-restart=0"
SCRIPT = '''
import sys, helper
def pytest_generate_tests(metafunc):
for i in [10]:
metafunc.parametrize('p', range(i))
def test_foo(p):
x = True
helper.do_stuff() # get some coverage in some other completely different location
if sys.version_info[0] > 5:
assert False
'''
SCRIPT2 = '''
#
def test_bar():
x = True
assert x
'''
COVERAGERC_SOURCE = '''\
[run]
source = .
'''
SCRIPT_CHILD = '''
import sys
idx = int(sys.argv[1])
if idx == 0:
foo = "a" # previously there was a "pass" here but Python 3.5 optimizes it away.
if idx == 1:
foo = "b" # previously there was a "pass" here but Python 3.5 optimizes it away.
'''
SCRIPT_PARENT = '''
import os
import subprocess
import sys
def pytest_generate_tests(metafunc):
for i in [2]:
metafunc.parametrize('idx', range(i))
def test_foo(idx):
out, err = subprocess.Popen(
[sys.executable, os.path.join(os.path.dirname(__file__), 'child_script.py'), str(idx)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
# there is a issue in coverage.py with multiline statements at
# end of file: https://bitbucket.org/ned/coveragepy/issue/293
pass
'''
SCRIPT_PARENT_CHANGE_CWD = '''
import subprocess
import sys
import os
def pytest_generate_tests(metafunc):
for i in [2]:
metafunc.parametrize('idx', range(i))
def test_foo(idx):
os.mkdir("foobar")
os.chdir("foobar")
subprocess.check_call([
sys.executable,
os.path.join(os.path.dirname(__file__), 'child_script.py'),
str(idx)
])
# there is a issue in coverage.py with multiline statements at
# end of file: https://bitbucket.org/ned/coveragepy/issue/293
pass
'''
SCRIPT_PARENT_CHANGE_CWD_IMPORT_CHILD = '''
import subprocess
import sys
import os
def pytest_generate_tests(metafunc):
for i in [2]:
if metafunc.function is test_foo: metafunc.parametrize('idx', range(i))
def test_foo(idx):
os.mkdir("foobar")
os.chdir("foobar")
subprocess.check_call([
sys.executable,
'-c', 'import sys; sys.argv = ["", str(%s)]; import child_script' % idx
])
# there is a issue in coverage.py with multiline statements at
# end of file: https://bitbucket.org/ned/coveragepy/issue/293
pass
'''
SCRIPT_FUNCARG = '''
import coverage
def test_foo(cov):
assert isinstance(cov, coverage.Coverage)
'''
SCRIPT_FUNCARG_NOT_ACTIVE = '''
def test_foo(cov):
assert cov is None
'''
CHILD_SCRIPT_RESULT = '[56] * 100%'
PARENT_SCRIPT_RESULT = '9 * 100%'
DEST_DIR = 'cov_dest'
REPORT_NAME = 'cov.xml'
xdist_params = pytest.mark.parametrize('opts', [
'',
pytest.param('-n 1', marks=pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"'))
], ids=['nodist', 'xdist'])
@pytest.fixture(scope='session', autouse=True)
def adjust_sys_path():
"""Adjust PYTHONPATH during tests to make "helper" importable in SCRIPT."""
orig_path = os.environ.get('PYTHONPATH', None)
new_path = os.path.dirname(__file__)
if orig_path is not None:
new_path = os.pathsep.join([new_path, orig_path])
os.environ['PYTHONPATH'] = new_path
yield
if orig_path is None:
del os.environ['PYTHONPATH']
else:
os.environ['PYTHONPATH'] = orig_path
@pytest.fixture(params=[
('branch=true', '--cov-branch', '9 * 85%', '3 * 100%'),
('branch=true', '', '9 * 85%', '3 * 100%'),
('', '--cov-branch', '9 * 85%', '3 * 100%'),
('', '', '9 * 89%', '3 * 100%'),
], ids=['branch2x', 'branch1c', 'branch1a', 'nobranch'])
def prop(request):
return Namespace(
code=SCRIPT,
code2=SCRIPT2,
conf=request.param[0],
fullconf='[run]\n%s\n' % request.param[0],
prefixedfullconf='[coverage:run]\n%s\n' % request.param[0],
args=request.param[1].split(),
result=request.param[2],
result2=request.param[3],
)
def test_central(testdir, prop):
script = testdir.makepyfile(prop.code)
testdir.tmpdir.join('.coveragerc').write(prop.fullconf)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script,
*prop.args)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_central* %s *' % prop.result,
'*10 passed*'
])
assert result.ret == 0
def test_annotate(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=annotate',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'Coverage annotated source written next to source',
'*10 passed*',
])
assert result.ret == 0
def test_annotate_output_dir(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=annotate:' + DEST_DIR,
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'Coverage annotated source written to dir ' + DEST_DIR,
'*10 passed*',
])
dest_dir = testdir.tmpdir.join(DEST_DIR)
assert dest_dir.check(dir=True)
assert dest_dir.join(script.basename + ",cover").check()
assert result.ret == 0
def test_html(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=html',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'Coverage HTML written to dir htmlcov',
'*10 passed*',
])
dest_dir = testdir.tmpdir.join('htmlcov')
assert dest_dir.check(dir=True)
assert dest_dir.join("index.html").check()
assert result.ret == 0
def test_html_output_dir(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=html:' + DEST_DIR,
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'Coverage HTML written to dir ' + DEST_DIR,
'*10 passed*',
])
dest_dir = testdir.tmpdir.join(DEST_DIR)
assert dest_dir.check(dir=True)
assert dest_dir.join("index.html").check()
assert result.ret == 0
def test_term_report_does_not_interact_with_html_output(testdir):
script = testdir.makepyfile(test_funcarg=SCRIPT_FUNCARG)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing:skip-covered',
'--cov-report=html:' + DEST_DIR,
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'Coverage HTML written to dir ' + DEST_DIR,
'*1 passed*',
])
dest_dir = testdir.tmpdir.join(DEST_DIR)
assert dest_dir.check(dir=True)
assert sorted(dest_dir.visit("**/*.html")) == [dest_dir.join("index.html"), dest_dir.join("test_funcarg_py.html")]
assert dest_dir.join("index.html").check()
assert result.ret == 0
def test_html_configured_output_dir(testdir):
script = testdir.makepyfile(SCRIPT)
testdir.tmpdir.join('.coveragerc').write("""
[html]
directory = somewhere
""")
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=html',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'Coverage HTML written to dir somewhere',
'*10 passed*',
])
dest_dir = testdir.tmpdir.join('somewhere')
assert dest_dir.check(dir=True)
assert dest_dir.join("index.html").check()
assert result.ret == 0
def test_xml_output_dir(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=xml:' + REPORT_NAME,
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'Coverage XML written to file ' + REPORT_NAME,
'*10 passed*',
])
assert testdir.tmpdir.join(REPORT_NAME).check()
assert result.ret == 0
def test_term_output_dir(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term:' + DEST_DIR,
script)
result.stderr.fnmatch_lines([
'*argument --cov-report: output specifier not supported for: "term:%s"*' % DEST_DIR,
])
assert result.ret != 0
def test_term_missing_output_dir(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing:' + DEST_DIR,
script)
result.stderr.fnmatch_lines([
'*argument --cov-report: output specifier not supported for: '
'"term-missing:%s"*' % DEST_DIR,
])
assert result.ret != 0
def test_cov_min_100(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--cov-fail-under=100',
script)
assert result.ret != 0
result.stdout.fnmatch_lines([
'FAIL Required test coverage of 100% not reached. Total coverage: *%'
])
def test_cov_min_50(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=html',
'--cov-report=xml',
'--cov-fail-under=50',
script)
assert result.ret == 0
result.stdout.fnmatch_lines([
'Required test coverage of 50% reached. Total coverage: *%'
])
def test_cov_min_float_value(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--cov-fail-under=88.88',
script)
assert result.ret == 0
result.stdout.fnmatch_lines([
'Required test coverage of 88.88% reached. Total coverage: 88.89%'
])
def test_cov_min_float_value_not_reached(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--cov-fail-under=88.89',
script)
assert result.ret == 1
result.stdout.fnmatch_lines([
'FAIL Required test coverage of 88.89% not reached. Total coverage: 88.89%'
])
def test_cov_min_no_report(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=',
'--cov-fail-under=50',
script)
assert result.ret == 0
result.stdout.fnmatch_lines([
'Required test coverage of 50% reached. Total coverage: *%'
])
def test_central_nonspecific(testdir, prop):
script = testdir.makepyfile(prop.code)
testdir.tmpdir.join('.coveragerc').write(prop.fullconf)
result = testdir.runpytest('-v',
'--cov',
'--cov-report=term-missing',
script, *prop.args)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_central_nonspecific* %s *' % prop.result,
'*10 passed*'
])
# multi-module coverage report
assert any(line.startswith('TOTAL ') for line in result.stdout.lines)
assert result.ret == 0
def test_cov_min_from_coveragerc(testdir):
script = testdir.makepyfile(SCRIPT)
testdir.tmpdir.join('.coveragerc').write("""
[report]
fail_under = 100
""")
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert result.ret != 0
def test_central_coveragerc(testdir, prop):
script = testdir.makepyfile(prop.code)
testdir.tmpdir.join('.coveragerc').write(COVERAGERC_SOURCE + prop.conf)
result = testdir.runpytest('-v',
'--cov',
'--cov-report=term-missing',
script, *prop.args)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_central_coveragerc* %s *' % prop.result,
'*10 passed*',
])
# single-module coverage report
assert all(not line.startswith('TOTAL ') for line in result.stdout.lines[-3:])
assert result.ret == 0
@xdist_params
def test_central_with_path_aliasing(testdir, monkeypatch, opts, prop):
mod1 = testdir.mkdir('src').join('mod.py')
mod1.write(SCRIPT)
mod2 = testdir.mkdir('aliased').join('mod.py')
mod2.write(SCRIPT)
script = testdir.makepyfile('''
from mod import *
''')
testdir.tmpdir.join('setup.cfg').write("""
[coverage:paths]
source =
src
aliased
[coverage:run]
source = mod
parallel = true
%s
""" % prop.conf)
monkeypatch.setitem(os.environ, 'PYTHONPATH', os.pathsep.join([os.environ.get('PYTHONPATH', ''), 'aliased']))
result = testdir.runpytest('-v', '-s',
'--cov',
'--cov-report=term-missing',
script, *opts.split()+prop.args)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'src[\\/]mod* %s *' % prop.result,
'*10 passed*',
])
# single-module coverage report
assert all(not line.startswith('TOTAL ') for line in result.stdout.lines[-3:])
assert result.ret == 0
@xdist_params
def test_borken_cwd(testdir, monkeypatch, opts):
testdir.makepyfile(mod='''
def foobar(a, b):
return a + b
''')
script = testdir.makepyfile('''
import os
import tempfile
import pytest
import mod
@pytest.fixture
def bad():
path = tempfile.mkdtemp('test_borken_cwd')
os.chdir(path)
yield
try:
os.rmdir(path)
except OSError:
pass
def test_foobar(bad):
assert mod.foobar(1, 2) == 3
''')
result = testdir.runpytest('-v', '-s',
'--cov=mod',
'--cov-branch',
script, *opts.split())
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'*mod* 100%',
'*1 passed*',
])
assert result.ret == 0
def test_subprocess_with_path_aliasing(testdir, monkeypatch):
src = testdir.mkdir('src')
src.join('parent_script.py').write(SCRIPT_PARENT)
src.join('child_script.py').write(SCRIPT_CHILD)
aliased = testdir.mkdir('aliased')
parent_script = aliased.join('parent_script.py')
parent_script.write(SCRIPT_PARENT)
aliased.join('child_script.py').write(SCRIPT_CHILD)
testdir.tmpdir.join('.coveragerc').write("""
[paths]
source =
src
aliased
[run]
source =
parent_script
child_script
parallel = true
""")
monkeypatch.setitem(os.environ, 'PYTHONPATH', os.pathsep.join([
os.environ.get('PYTHONPATH', ''), 'aliased']))
result = testdir.runpytest('-v',
'--cov',
'--cov-report=term-missing',
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'src[\\/]child_script* %s*' % CHILD_SCRIPT_RESULT,
'src[\\/]parent_script* %s*' % PARENT_SCRIPT_RESULT,
])
assert result.ret == 0
def test_show_missing_coveragerc(testdir, prop):
script = testdir.makepyfile(prop.code)
testdir.tmpdir.join('.coveragerc').write("""
[run]
source = .
%s
[report]
show_missing = true
""" % prop.conf)
result = testdir.runpytest('-v',
'--cov',
'--cov-report=term',
script, *prop.args)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'Name * Stmts * Miss * Cover * Missing',
'test_show_missing_coveragerc* %s * 11*' % prop.result,
'*10 passed*',
])
# single-module coverage report
assert all(not line.startswith('TOTAL ') for line in result.stdout.lines[-3:])
assert result.ret == 0
def test_no_cov_on_fail(testdir):
script = testdir.makepyfile('''
def test_fail():
assert False
''')
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--no-cov-on-fail',
script)
assert 'coverage: platform' not in result.stdout.str()
result.stdout.fnmatch_lines(['*1 failed*'])
def test_no_cov(testdir, monkeypatch):
script = testdir.makepyfile(SCRIPT)
testdir.makeini("""
[pytest]
addopts=--no-cov
""")
result = testdir.runpytest('-vvv',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'-rw',
script)
result.stdout.fnmatch_lines_random([
'WARNING: Coverage disabled via --no-cov switch!',
'*Coverage disabled via --no-cov switch!',
])
def test_cov_and_failure_report_on_fail(testdir):
script = testdir.makepyfile(SCRIPT + '''
def test_fail(p):
assert False
''')
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-fail-under=100',
'--cov-report=html',
script)
result.stdout.fnmatch_lines_random([
'*10 failed*',
'*coverage: platform*',
'*FAIL Required test coverage of 100% not reached*',
'*assert False*',
])
@pytest.mark.skipif('sys.platform == "win32" or platform.python_implementation() == "PyPy"')
def test_dist_combine_racecondition(testdir):
script = testdir.makepyfile("""
import pytest
@pytest.mark.parametrize("foo", range(1000))
def test_foo(foo):
""" + "\n".join("""
if foo == %s:
assert True
""" % i for i in range(1000)))
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'-n', '5', '-s',
script)
result.stdout.fnmatch_lines([
'test_dist_combine_racecondition* 0 * 100%*',
'*1000 passed*'
])
for line in chain(result.stdout.lines, result.stderr.lines):
assert 'The following workers failed to return coverage data' not in line
assert 'INTERNALERROR' not in line
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_dist_collocated(testdir, prop):
script = testdir.makepyfile(prop.code)
testdir.tmpdir.join('.coveragerc').write(prop.fullconf)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=2*popen',
max_worker_restart_0,
script, *prop.args)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_dist_collocated* %s *' % prop.result,
'*10 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_dist_not_collocated(testdir, prop):
script = testdir.makepyfile(prop.code)
dir1 = testdir.mkdir('dir1')
dir2 = testdir.mkdir('dir2')
testdir.tmpdir.join('.coveragerc').write('''
[run]
%s
[paths]
source =
.
dir1
dir2''' % prop.conf)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=popen//chdir=%s' % dir1,
'--tx=popen//chdir=%s' % dir2,
'--rsyncdir=%s' % script.basename,
'--rsyncdir=.coveragerc',
max_worker_restart_0, '-s',
script, *prop.args)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_dist_not_collocated* %s *' % prop.result,
'*10 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_dist_not_collocated_coveragerc_source(testdir, prop):
script = testdir.makepyfile(prop.code)
dir1 = testdir.mkdir('dir1')
dir2 = testdir.mkdir('dir2')
testdir.tmpdir.join('.coveragerc').write('''
[run]
%s
source = %s
[paths]
source =
.
dir1
dir2''' % (prop.conf, script.dirpath()))
result = testdir.runpytest('-v',
'--cov',
'--cov-report=term-missing',
'--dist=load',
'--tx=popen//chdir=%s' % dir1,
'--tx=popen//chdir=%s' % dir2,
'--rsyncdir=%s' % script.basename,
'--rsyncdir=.coveragerc',
max_worker_restart_0, '-s',
script, *prop.args)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_dist_not_collocated* %s *' % prop.result,
'*10 passed*'
])
assert result.ret == 0
def test_central_subprocess(testdir):
scripts = testdir.makepyfile(parent_script=SCRIPT_PARENT,
child_script=SCRIPT_CHILD)
parent_script = scripts.dirpath().join('parent_script.py')
result = testdir.runpytest('-v',
'--cov=%s' % scripts.dirpath(),
'--cov-report=term-missing',
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'child_script* %s*' % CHILD_SCRIPT_RESULT,
'parent_script* %s*' % PARENT_SCRIPT_RESULT,
])
assert result.ret == 0
def test_central_subprocess_change_cwd(testdir):
scripts = testdir.makepyfile(parent_script=SCRIPT_PARENT_CHANGE_CWD,
child_script=SCRIPT_CHILD)
parent_script = scripts.dirpath().join('parent_script.py')
testdir.makefile('', coveragerc="""
[run]
branch = true
parallel = true
""")
result = testdir.runpytest('-v', '-s',
'--cov=%s' % scripts.dirpath(),
'--cov-config=coveragerc',
'--cov-report=term-missing',
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'*child_script* %s*' % CHILD_SCRIPT_RESULT,
'*parent_script* 100%*',
])
assert result.ret == 0
def test_central_subprocess_change_cwd_with_pythonpath(testdir, monkeypatch):
stuff = testdir.mkdir('stuff')
parent_script = stuff.join('parent_script.py')
parent_script.write(SCRIPT_PARENT_CHANGE_CWD_IMPORT_CHILD)
stuff.join('child_script.py').write(SCRIPT_CHILD)
testdir.makefile('', coveragerc="""
[run]
parallel = true
""")
monkeypatch.setitem(os.environ, 'PYTHONPATH', str(stuff))
result = testdir.runpytest('-vv', '-s',
'--cov=child_script',
'--cov-config=coveragerc',
'--cov-report=term-missing',
'--cov-branch',
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'*child_script* %s*' % CHILD_SCRIPT_RESULT,
])
assert result.ret == 0
def test_central_subprocess_no_subscript(testdir):
script = testdir.makepyfile("""
import subprocess, sys
def test_foo():
subprocess.check_call([sys.executable, '-c', 'print("Hello World")'])
""")
testdir.makefile('', coveragerc="""
[run]
parallel = true
""")
result = testdir.runpytest('-v',
'--cov-config=coveragerc',
'--cov=%s' % script.dirpath(),
'--cov-branch',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_central_subprocess_no_subscript* * 3 * 0 * 100%*',
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_dist_subprocess_collocated(testdir):
scripts = testdir.makepyfile(parent_script=SCRIPT_PARENT,
child_script=SCRIPT_CHILD)
parent_script = scripts.dirpath().join('parent_script.py')
result = testdir.runpytest('-v',
'--cov=%s' % scripts.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=2*popen',
max_worker_restart_0,
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'child_script* %s*' % CHILD_SCRIPT_RESULT,
'parent_script* %s*' % PARENT_SCRIPT_RESULT,
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_dist_subprocess_not_collocated(testdir, tmpdir):
scripts = testdir.makepyfile(parent_script=SCRIPT_PARENT,
child_script=SCRIPT_CHILD)
parent_script = scripts.dirpath().join('parent_script.py')
child_script = scripts.dirpath().join('child_script.py')
dir1 = tmpdir.mkdir('dir1')
dir2 = tmpdir.mkdir('dir2')
testdir.tmpdir.join('.coveragerc').write('''
[paths]
source =
%s
*/dir1
*/dir2
''' % scripts.dirpath())
result = testdir.runpytest('-v',
'--cov=%s' % scripts.dirpath(),
'--dist=load',
'--tx=popen//chdir=%s' % dir1,
'--tx=popen//chdir=%s' % dir2,
'--rsyncdir=%s' % child_script,
'--rsyncdir=%s' % parent_script,
'--rsyncdir=.coveragerc',
max_worker_restart_0,
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'child_script* %s*' % CHILD_SCRIPT_RESULT,
'parent_script* %s*' % PARENT_SCRIPT_RESULT,
])
assert result.ret == 0
def test_invalid_coverage_source(testdir):
script = testdir.makepyfile(SCRIPT)
testdir.makeini("""
[pytest]
console_output_style=classic
""")
result = testdir.runpytest('-v',
'--cov=non_existent_module',
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*10 passed*'
])
result.stderr.fnmatch_lines([
'Coverage.py warning: No data was collected.*'
])
result.stdout.fnmatch_lines([
'*Failed to generate report: No data to report.',
])
assert result.ret == 0
matching_lines = [line for line in result.outlines if '%' in line]
assert not matching_lines
@pytest.mark.skipif("'dev' in pytest.__version__")
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_dist_missing_data(testdir):
"""Test failure when using a worker without pytest-cov installed."""
venv_path = os.path.join(str(testdir.tmpdir), 'venv')
virtualenv.cli_run([venv_path])
if sys.platform == 'win32':
if platform.python_implementation() == "PyPy":
exe = os.path.join(venv_path, 'bin', 'python.exe')
else:
exe = os.path.join(venv_path, 'Scripts', 'python.exe')
else:
exe = os.path.join(venv_path, 'bin', 'python')
subprocess.check_call([
exe,
'-mpip',
'install',
'py==%s' % py.__version__,
'pytest==%s' % pytest.__version__,
'pytest_xdist==%s' % xdist.__version__
])
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--assert=plain',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=popen//python=%s' % exe,
max_worker_restart_0,
script)
result.stdout.fnmatch_lines([
'The following workers failed to return coverage data, ensure that pytest-cov is installed on these workers.'
])
def test_funcarg(testdir):
script = testdir.makepyfile(SCRIPT_FUNCARG)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_funcarg* 3 * 100%*',
'*1 passed*'
])
assert result.ret == 0
def test_funcarg_not_active(testdir):
script = testdir.makepyfile(SCRIPT_FUNCARG_NOT_ACTIVE)
result = testdir.runpytest('-v',
script)
result.stdout.fnmatch_lines([
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif("sys.version_info[0] < 3", reason="no context manager api on Python 2")
@pytest.mark.skipif('sys.platform == "win32"', reason="multiprocessing support is broken on Windows")
@pytest.mark.skipif('platform.python_implementation() == "PyPy"', reason="often deadlocks on PyPy")
@pytest.mark.skipif('sys.version_info[:2] >= (3, 8)', reason="deadlocks on Python 3.8+, see: https://bugs.python.org/issue38227")
def test_multiprocessing_pool(testdir):
pytest.importorskip('multiprocessing.util')
script = testdir.makepyfile('''
import multiprocessing
def target_fn(a):
%sse: # pragma: nocover
return None
def test_run_target():
from pytest_cov.embed import cleanup_on_sigterm
cleanup_on_sigterm()
for i in range(33):
with multiprocessing.Pool(3) as p:
p.map(target_fn, [i * 3 + j for j in range(3)])
p.join()
''' % ''.join('''if a == %r:
return a
el''' % i for i in range(99)))
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert "Doesn't seem to be a coverage.py data file" not in result.stdout.str()
assert "Doesn't seem to be a coverage.py data file" not in result.stderr.str()
assert not testdir.tmpdir.listdir(".coverage.*")
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_multiprocessing_pool* 100%*',
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32"', reason="multiprocessing support is broken on Windows")
@pytest.mark.skipif('platform.python_implementation() == "PyPy"', reason="often deadlocks on PyPy")
@pytest.mark.skipif('sys.version_info[:2] >= (3, 8)', reason="deadlocks on Python 3.8, see: https://bugs.python.org/issue38227")
def test_multiprocessing_pool_terminate(testdir):
pytest.importorskip('multiprocessing.util')
script = testdir.makepyfile('''
import multiprocessing
def target_fn(a):
%sse: # pragma: nocover
return None
def test_run_target():
from pytest_cov.embed import cleanup_on_sigterm
cleanup_on_sigterm()
for i in range(33):
p = multiprocessing.Pool(3)
try:
p.map(target_fn, [i * 3 + j for j in range(3)])
finally:
p.terminate()
p.join()
''' % ''.join('''if a == %r:
return a
el''' % i for i in range(99)))
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert "Doesn't seem to be a coverage.py data file" not in result.stdout.str()
assert "Doesn't seem to be a coverage.py data file" not in result.stderr.str()
assert not testdir.tmpdir.listdir(".coverage.*")
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_multiprocessing_pool* 100%*',
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32"', reason="multiprocessing support is broken on Windows")
@pytest.mark.skipif('sys.version_info[0] > 2 and platform.python_implementation() == "PyPy"', reason="broken on PyPy3")
def test_multiprocessing_pool_close(testdir):
pytest.importorskip('multiprocessing.util')
script = testdir.makepyfile('''
import multiprocessing
def target_fn(a):
%sse: # pragma: nocover
return None
def test_run_target():
for i in range(33):
p = multiprocessing.Pool(3)
try:
p.map(target_fn, [i * 3 + j for j in range(3)])
finally:
p.close()
p.join()
''' % ''.join('''if a == %r:
return a
el''' % i for i in range(99)))
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert "Doesn't seem to be a coverage.py data file" not in result.stdout.str()
assert "Doesn't seem to be a coverage.py data file" not in result.stderr.str()
assert not testdir.tmpdir.listdir(".coverage.*")
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_multiprocessing_pool* 100%*',
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32"', reason="multiprocessing support is broken on Windows")
def test_multiprocessing_process(testdir):
pytest.importorskip('multiprocessing.util')
script = testdir.makepyfile('''
import multiprocessing
def target_fn():
a = True
return a
def test_run_target():
p = multiprocessing.Process(target=target_fn)
p.start()
p.join()
''')
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_multiprocessing_process* 8 * 100%*',
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32"', reason="multiprocessing support is broken on Windows")
def test_multiprocessing_process_no_source(testdir):
pytest.importorskip('multiprocessing.util')
script = testdir.makepyfile('''
import multiprocessing
def target_fn():
a = True
return a
def test_run_target():
p = multiprocessing.Process(target=target_fn)
p.start()
p.join()
''')
result = testdir.runpytest('-v',
'--cov',
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_multiprocessing_process* 8 * 100%*',
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32"', reason="multiprocessing support is broken on Windows")
def test_multiprocessing_process_with_terminate(testdir):
pytest.importorskip('multiprocessing.util')
script = testdir.makepyfile('''
import multiprocessing
import time
from pytest_cov.embed import cleanup_on_sigterm
cleanup_on_sigterm()
event = multiprocessing.Event()
def target_fn():
a = True
event.set()
time.sleep(5)
def test_run_target():
p = multiprocessing.Process(target=target_fn)
p.start()
time.sleep(0.5)
event.wait(1)
p.terminate()
p.join()
''')
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_multiprocessing_process* 16 * 100%*',
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32"', reason="SIGTERM isn't really supported on Windows")
def test_cleanup_on_sigterm(testdir):
script = testdir.makepyfile('''
import os, signal, subprocess, sys, time
def cleanup(num, frame):
print("num == signal.SIGTERM => %s" % (num == signal.SIGTERM))
raise Exception()
def test_run():
proc = subprocess.Popen([sys.executable, __file__], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
time.sleep(1)
proc.terminate()
stdout, stderr = proc.communicate()
assert not stderr
assert stdout == b"""num == signal.SIGTERM => True
captured Exception()
"""
assert proc.returncode == 0
if __name__ == "__main__":
signal.signal(signal.SIGTERM, cleanup)
from pytest_cov.embed import cleanup_on_sigterm
cleanup_on_sigterm()
try:
time.sleep(10)
except BaseException as exc:
print("captured %r" % exc)
''')
result = testdir.runpytest('-vv',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_cleanup_on_sigterm* 26-27',
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform != "win32"')
@pytest.mark.parametrize('setup', [
('signal.signal(signal.SIGBREAK, signal.SIG_DFL); cleanup_on_signal(signal.SIGBREAK)', '87% 21-22'),
('cleanup_on_signal(signal.SIGBREAK)', '87% 21-22'),
('cleanup()', '73% 19-22'),
])
def test_cleanup_on_sigterm_sig_break(testdir, setup):
# worth a read: https://stefan.sofa-rockers.org/2013/08/15/handling-sub-process-hierarchies-python-linux-os-x/
script = testdir.makepyfile('''
import os, signal, subprocess, sys, time
def test_run():
proc = subprocess.Popen(
[sys.executable, __file__],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP, shell=True
)
time.sleep(1)
proc.send_signal(signal.CTRL_BREAK_EVENT)
stdout, stderr = proc.communicate()
assert not stderr
assert stdout in [b"^C", b"", b"captured IOError(4, 'Interrupted function call')\\n"]
if __name__ == "__main__":
from pytest_cov.embed import cleanup_on_signal, cleanup
''' + setup[0] + '''
try:
time.sleep(10)
except BaseException as exc:
print("captured %r" % exc)
''')
result = testdir.runpytest('-vv',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_cleanup_on_sigterm* %s' % setup[1],
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32"', reason="SIGTERM isn't really supported on Windows")
@pytest.mark.parametrize('setup', [
('signal.signal(signal.SIGTERM, signal.SIG_DFL); cleanup_on_sigterm()', '88% 18-19'),
('cleanup_on_sigterm()', '88% 18-19'),
('cleanup()', '75% 16-19'),
])
def test_cleanup_on_sigterm_sig_dfl(testdir, setup):
script = testdir.makepyfile('''
import os, signal, subprocess, sys, time
def test_run():
proc = subprocess.Popen([sys.executable, __file__], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
time.sleep(1)
proc.terminate()
stdout, stderr = proc.communicate()
assert not stderr
assert stdout == b""
assert proc.returncode in [128 + signal.SIGTERM, -signal.SIGTERM]
if __name__ == "__main__":
from pytest_cov.embed import cleanup_on_sigterm, cleanup
''' + setup[0] + '''
try:
time.sleep(10)
except BaseException as exc:
print("captured %r" % exc)
''')
result = testdir.runpytest('-vv',
'--assert=plain',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_cleanup_on_sigterm* %s' % setup[1],
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32"', reason="SIGINT is subtly broken on Windows")
def test_cleanup_on_sigterm_sig_dfl_sigint(testdir):
script = testdir.makepyfile('''
import os, signal, subprocess, sys, time
def test_run():
proc = subprocess.Popen([sys.executable, __file__], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
time.sleep(1)
proc.send_signal(signal.SIGINT)
stdout, stderr = proc.communicate()
assert not stderr
assert stdout == b"""captured KeyboardInterrupt()
"""
assert proc.returncode == 0
if __name__ == "__main__":
from pytest_cov.embed import cleanup_on_signal
cleanup_on_signal(signal.SIGINT)
try:
time.sleep(10)
except BaseException as exc:
print("captured %r" % exc)
''')
result = testdir.runpytest('-vv',
'--assert=plain',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_cleanup_on_sigterm* 88% 19-20',
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32"', reason="fork not available on Windows")
def test_cleanup_on_sigterm_sig_ign(testdir):
script = testdir.makepyfile('''
import os, signal, subprocess, sys, time
def test_run():
proc = subprocess.Popen([sys.executable, __file__], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
time.sleep(1)
proc.send_signal(signal.SIGINT)
time.sleep(1)
proc.terminate()
stdout, stderr = proc.communicate()
assert not stderr
assert stdout == b""
# it appears signal handling is buggy on python 2?
if sys.version_info == 3: assert proc.returncode in [128 + signal.SIGTERM, -signal.SIGTERM]
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal.SIG_IGN)
from pytest_cov.embed import cleanup_on_signal
cleanup_on_signal(signal.SIGINT)
try:
time.sleep(10)
except BaseException as exc:
print("captured %r" % exc)
''')
result = testdir.runpytest('-vv',
'--assert=plain',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_cleanup_on_sigterm* 89% 23-24',
'*1 passed*'
])
assert result.ret == 0
MODULE = '''
def func():
return 1
'''
CONFTEST = '''
import mod
mod.func()
'''
BASIC_TEST = '''
def test_basic():
x = True
assert x
'''
CONF_RESULT = 'mod* 2 * 100%*'
def test_cover_conftest(testdir):
testdir.makepyfile(mod=MODULE)
testdir.makeconftest(CONFTEST)
script = testdir.makepyfile(BASIC_TEST)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert result.ret == 0
result.stdout.fnmatch_lines([CONF_RESULT])
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_cover_looponfail(testdir, monkeypatch):
testdir.makepyfile(mod=MODULE)
testdir.makeconftest(CONFTEST)
script = testdir.makepyfile(BASIC_TEST)
def mock_run(*args, **kwargs):
return _TestProcess(*map(str, args))
monkeypatch.setattr(testdir, 'run', mock_run)
assert testdir.run is mock_run
if hasattr(testdir, '_pytester'):
monkeypatch.setattr(testdir._pytester, 'run', mock_run)
assert testdir._pytester.run is mock_run
with testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--looponfail',
script) as process:
with dump_on_error(process.read):
wait_for_strings(
process.read,
30, # 30 seconds
'Stmts Miss Cover'
)
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_cover_conftest_dist(testdir):
testdir.makepyfile(mod=MODULE)
testdir.makeconftest(CONFTEST)
script = testdir.makepyfile(BASIC_TEST)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=2*popen',
max_worker_restart_0,
script)
assert result.ret == 0
result.stdout.fnmatch_lines([CONF_RESULT])
def test_no_cover_marker(testdir):
testdir.makepyfile(mod=MODULE)
script = testdir.makepyfile('''
import pytest
import mod
import subprocess
import sys
@pytest.mark.no_cover
def test_basic():
mod.func()
subprocess.check_call([sys.executable, '-c', 'from mod import func; func()'])
''')
result = testdir.runpytest('-v', '-ra', '--strict',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert result.ret == 0
result.stdout.fnmatch_lines(['mod* 2 * 1 * 50% * 2'])
def test_no_cover_fixture(testdir):
testdir.makepyfile(mod=MODULE)
script = testdir.makepyfile('''
import mod
import subprocess
import sys
def test_basic(no_cover):
mod.func()
subprocess.check_call([sys.executable, '-c', 'from mod import func; func()'])
''')
result = testdir.runpytest('-v', '-ra', '--strict',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert result.ret == 0
result.stdout.fnmatch_lines(['mod* 2 * 1 * 50% * 2'])
COVERAGERC = '''
[report]
# Regexes for lines to exclude from consideration
exclude_lines =
raise NotImplementedError
'''
EXCLUDED_TEST = '''
def func():
raise NotImplementedError
def test_basic():
x = True
assert x
'''
EXCLUDED_RESULT = '4 * 100%*'
def test_coveragerc(testdir):
testdir.makefile('', coveragerc=COVERAGERC)
script = testdir.makepyfile(EXCLUDED_TEST)
result = testdir.runpytest('-v',
'--cov-config=coveragerc',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert result.ret == 0
result.stdout.fnmatch_lines(['test_coveragerc* %s' % EXCLUDED_RESULT])
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_coveragerc_dist(testdir):
testdir.makefile('', coveragerc=COVERAGERC)
script = testdir.makepyfile(EXCLUDED_TEST)
result = testdir.runpytest('-v',
'--cov-config=coveragerc',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'-n', '2',
max_worker_restart_0,
script)
assert result.ret == 0
result.stdout.fnmatch_lines(
['test_coveragerc_dist* %s' % EXCLUDED_RESULT])
SKIP_COVERED_COVERAGERC = '''
[report]
skip_covered = True
'''
SKIP_COVERED_TEST = '''
def func():
return "full coverage"
def test_basic():
assert func() == "full coverage"
'''
SKIP_COVERED_RESULT = '1 file skipped due to complete coverage.'
@pytest.mark.parametrize('report_option', [
'term-missing:skip-covered',
'term:skip-covered'])
def test_skip_covered_cli(testdir, report_option):
testdir.makefile('', coveragerc=SKIP_COVERED_COVERAGERC)
script = testdir.makepyfile(SKIP_COVERED_TEST)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=%s' % report_option,
script)
assert result.ret == 0
result.stdout.fnmatch_lines([SKIP_COVERED_RESULT])
def test_skip_covered_coveragerc_config(testdir):
testdir.makefile('', coveragerc=SKIP_COVERED_COVERAGERC)
script = testdir.makepyfile(SKIP_COVERED_TEST)
result = testdir.runpytest('-v',
'--cov-config=coveragerc',
'--cov=%s' % script.dirpath(),
script)
assert result.ret == 0
result.stdout.fnmatch_lines([SKIP_COVERED_RESULT])
CLEAR_ENVIRON_TEST = '''
import os
def test_basic():
os.environ.clear()
'''
def test_clear_environ(testdir):
script = testdir.makepyfile(CLEAR_ENVIRON_TEST)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert result.ret == 0
SCRIPT_SIMPLE = '''
def test_foo():
assert 1 == 1
x = True
assert x
'''
SCRIPT_SIMPLE_RESULT = '4 * 100%'
@pytest.mark.skipif('sys.platform == "win32"')
def test_dist_boxed(testdir):
script = testdir.makepyfile(SCRIPT_SIMPLE)
result = testdir.runpytest('-v',
'--assert=plain',
'--cov=%s' % script.dirpath(),
'--boxed',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_dist_boxed* %s*' % SCRIPT_SIMPLE_RESULT,
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32"')
@pytest.mark.skipif('sys.version_info[0] > 2 and platform.python_implementation() == "PyPy"',
reason="strange optimization on PyPy3")
def test_dist_bare_cov(testdir):
script = testdir.makepyfile(SCRIPT_SIMPLE)
result = testdir.runpytest('-v',
'--cov',
'-n', '1',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_dist_bare_cov* %s*' % SCRIPT_SIMPLE_RESULT,
'*1 passed*'
])
assert result.ret == 0
def test_not_started_plugin_does_not_fail(testdir):
class ns:
cov_source = [True]
cov_report = ''
plugin = pytest_cov.plugin.CovPlugin(ns, None, start=False)
plugin.pytest_runtestloop(None)
plugin.pytest_terminal_summary(None)
def test_default_output_setting(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
script)
result.stdout.fnmatch_lines([
'*coverage*'
])
assert result.ret == 0
def test_disabled_output(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=',
script)
stdout = result.stdout.str()
# We don't want the path to the executable to fail the test if we happen
# to put the project in a directory with "coverage" in it.
stdout = stdout.replace(sys.executable, "<SYS.EXECUTABLE>")
assert 'coverage' not in stdout
assert result.ret == 0
def test_coverage_file(testdir):
script = testdir.makepyfile(SCRIPT)
data_file_name = 'covdata'
os.environ['COVERAGE_FILE'] = data_file_name
try:
result = testdir.runpytest('-v', '--cov=%s' % script.dirpath(),
script)
assert result.ret == 0
data_file = testdir.tmpdir.join(data_file_name)
assert data_file.check()
finally:
os.environ.pop('COVERAGE_FILE')
def test_external_data_file(testdir):
script = testdir.makepyfile(SCRIPT)
testdir.tmpdir.join('.coveragerc').write("""
[run]
data_file = %s
""" % testdir.tmpdir.join('some/special/place/coverage-data').ensure())
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
script)
assert result.ret == 0
assert glob.glob(str(testdir.tmpdir.join('some/special/place/coverage-data*')))
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_external_data_file_xdist(testdir):
script = testdir.makepyfile(SCRIPT)
testdir.tmpdir.join('.coveragerc').write("""
[run]
parallel = true
data_file = %s
""" % testdir.tmpdir.join('some/special/place/coverage-data').ensure())
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'-n', '1',
max_worker_restart_0,
script)
assert result.ret == 0
assert glob.glob(str(testdir.tmpdir.join('some/special/place/coverage-data*')))
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_xdist_no_data_collected(testdir):
testdir.makepyfile(target="x = 123")
script = testdir.makepyfile("""
import target
def test_foobar():
assert target.x == 123
""")
result = testdir.runpytest('-v',
'--cov=target',
'-n', '1',
script)
assert 'no-data-collected' not in result.stderr.str()
assert 'no-data-collected' not in result.stdout.str()
assert 'module-not-imported' not in result.stderr.str()
assert 'module-not-imported' not in result.stdout.str()
assert result.ret == 0
def test_external_data_file_negative(testdir):
script = testdir.makepyfile(SCRIPT)
testdir.tmpdir.join('.coveragerc').write("")
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
script)
assert result.ret == 0
assert glob.glob(str(testdir.tmpdir.join('.coverage*')))
@xdist_params
def test_append_coverage(testdir, opts, prop):
script = testdir.makepyfile(test_1=prop.code)
testdir.tmpdir.join('.coveragerc').write(prop.fullconf)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
script,
*opts.split()+prop.args)
result.stdout.fnmatch_lines([
'test_1* %s*' % prop.result,
])
script2 = testdir.makepyfile(test_2=prop.code2)
result = testdir.runpytest('-v',
'--cov-append',
'--cov=%s' % script2.dirpath(),
script2,
*opts.split()+prop.args)
result.stdout.fnmatch_lines([
'test_1* %s*' % prop.result,
'test_2* %s*' % prop.result2,
])
@xdist_params
def test_do_not_append_coverage(testdir, opts, prop):
script = testdir.makepyfile(test_1=prop.code)
testdir.tmpdir.join('.coveragerc').write(prop.fullconf)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
script,
*opts.split()+prop.args)
result.stdout.fnmatch_lines([
'test_1* %s*' % prop.result,
])
script2 = testdir.makepyfile(test_2=prop.code2)
result = testdir.runpytest('-v',
'--cov=%s' % script2.dirpath(),
script2,
*opts.split()+prop.args)
result.stdout.fnmatch_lines([
'test_1* 0%',
'test_2* %s*' % prop.result2,
])
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_append_coverage_subprocess(testdir):
scripts = testdir.makepyfile(parent_script=SCRIPT_PARENT,
child_script=SCRIPT_CHILD)
parent_script = scripts.dirpath().join('parent_script.py')
result = testdir.runpytest('-v',
'--cov=%s' % scripts.dirpath(),
'--cov-append',
'--cov-report=term-missing',
'--dist=load',
'--tx=2*popen',
max_worker_restart_0,
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'child_script* %s*' % CHILD_SCRIPT_RESULT,
'parent_script* %s*' % PARENT_SCRIPT_RESULT,
])
assert result.ret == 0
def test_pth_failure(monkeypatch):
with open('src/pytest-cov.pth') as fh:
payload = fh.read()
class SpecificError(Exception):
pass
def bad_init():
raise SpecificError()
buff = StringIO()
from pytest_cov import embed
monkeypatch.setattr(embed, 'init', bad_init)
monkeypatch.setattr(sys, 'stderr', buff)
monkeypatch.setitem(os.environ, 'COV_CORE_SOURCE', 'foobar')
exec_(payload)
assert buff.getvalue() == '''pytest-cov: Failed to setup subprocess coverage. Environ: {'COV_CORE_SOURCE': 'foobar'} Exception: SpecificError()
'''
def test_double_cov(testdir):
script = testdir.makepyfile(SCRIPT_SIMPLE)
result = testdir.runpytest('-v',
'--assert=plain',
'--cov', '--cov=%s' % script.dirpath(),
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_double_cov* %s*' % SCRIPT_SIMPLE_RESULT,
'*1 passed*'
])
assert result.ret == 0
def test_double_cov2(testdir):
script = testdir.makepyfile(SCRIPT_SIMPLE)
result = testdir.runpytest('-v',
'--assert=plain',
'--cov', '--cov',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_double_cov2* %s*' % SCRIPT_SIMPLE_RESULT,
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_cov_and_no_cov(testdir):
script = testdir.makepyfile(SCRIPT_SIMPLE)
result = testdir.runpytest('-v',
'--cov', '--no-cov',
'-n', '1',
'-s',
script)
assert 'Coverage disabled via --no-cov switch!' not in result.stdout.str()
assert 'Coverage disabled via --no-cov switch!' not in result.stderr.str()
assert result.ret == 0
def find_labels(text, pattern):
all_labels = collections.defaultdict(set)
lines = text.splitlines()
for lineno, line in enumerate(lines, start=1):
labels = re.findall(pattern, line)
for label in labels:
all_labels[label].add(lineno)
return all_labels
# The contexts and their labels in contextful.py
EXPECTED_CONTEXTS = {
'': 'c0',
'test_contexts.py::test_01|run': 'r1',
'test_contexts.py::test_02|run': 'r2',
'test_contexts.py::OldStyleTests::test_03|setup': 's3',
'test_contexts.py::OldStyleTests::test_03|run': 'r3',
'test_contexts.py::OldStyleTests::test_04|run': 'r4',
'test_contexts.py::OldStyleTests::test_04|teardown': 't4',
'test_contexts.py::test_05|setup': 's5',
'test_contexts.py::test_05|run': 'r5',
'test_contexts.py::test_06|setup': 's6',
'test_contexts.py::test_06|run': 'r6',
'test_contexts.py::test_07|setup': 's7',
'test_contexts.py::test_07|run': 'r7',
'test_contexts.py::test_08|run': 'r8',
'test_contexts.py::test_09[1]|setup': 's9-1',
'test_contexts.py::test_09[1]|run': 'r9-1',
'test_contexts.py::test_09[2]|setup': 's9-2',
'test_contexts.py::test_09[2]|run': 'r9-2',
'test_contexts.py::test_09[3]|setup': 's9-3',
'test_contexts.py::test_09[3]|run': 'r9-3',
'test_contexts.py::test_10|run': 'r10',
'test_contexts.py::test_11[1-101]|run': 'r11-1',
'test_contexts.py::test_11[2-202]|run': 'r11-2',
'test_contexts.py::test_12[one]|run': 'r12-1',
'test_contexts.py::test_12[two]|run': 'r12-2',
'test_contexts.py::test_13[3-1]|run': 'r13-1',
'test_contexts.py::test_13[3-2]|run': 'r13-2',
'test_contexts.py::test_13[4-1]|run': 'r13-3',
'test_contexts.py::test_13[4-2]|run': 'r13-4',
}
@pytest.mark.skipif("coverage.version_info < (5, 0)")
@xdist_params
def test_contexts(testdir, opts):
with open(os.path.join(os.path.dirname(__file__), "contextful.py")) as f:
contextful_tests = f.read()
script = testdir.makepyfile(contextful_tests)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-context=test',
script,
*opts.split()
)
assert result.ret == 0
result.stdout.fnmatch_lines([
'test_contexts* 100%*',
])
data = coverage.CoverageData(".coverage")
data.read()
assert data.measured_contexts() == set(EXPECTED_CONTEXTS)
measured = data.measured_files()
assert len(measured) == 1
test_context_path = list(measured)[0]
assert test_context_path.lower() == os.path.abspath("test_contexts.py").lower()
line_data = find_labels(contextful_tests, r"[crst]\d+(?:-\d+)?")
for context, label in EXPECTED_CONTEXTS.items():
if context == '':
continue
data.set_query_context(context)
actual = set(data.lines(test_context_path))
assert line_data[label] == actual, "Wrong lines for context {!r}".format(context)
@pytest.mark.skipif("coverage.version_info >= (5, 0)")
def test_contexts_not_supported(testdir):
script = testdir.makepyfile("a = 1")
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-context=test',
script,
)
result.stderr.fnmatch_lines([
'*argument --cov-context: Contexts are only supported with coverage.py >= 5.x',
])
assert result.ret != 0
def test_issue_417(testdir):
# https://github.com/pytest-dev/pytest-cov/issues/417
whatever = testdir.maketxtfile(whatever="")
testdir.inline_genitems(whatever)
|
[] |
[] |
[
"COVERAGE_FILE",
"PYTHONPATH"
] |
[]
|
["COVERAGE_FILE", "PYTHONPATH"]
|
python
| 2 | 0 | |
src/main/java/com/stalkedbythestate/sbts/updateavailable/Main.java
|
package com.stalkedbythestate.sbts.updateavailable;
// Copyright (c) 2021 Kim Hendrikse
import javax.net.ssl.HttpsURLConnection;
import java.io.*;
import java.net.*;
public class Main {
public static void main(String[] args) {
Main main = new Main();
if (args.length != 1)
System.exit(1);
String version = args[0];
main.check(version);
}
private void check(String version) {
System.setProperty("javax.net.ssl.keyStore", System.getenv("HOME")
+ "/update/" + "cacerts" + "/" + "keystore.jks");
System.setProperty("javax.net.ssl.keyStorePassword", "n1txo0rdBurrN");
System.setProperty("javax.net.ssl.trustStore", System.getenv("HOME")
+ "/update/" + "certs" + "/" + "truststore.jks");
System.setProperty("javax.net.ssl.trustStorePassword", "n1txo0rdBurrN");
URL url = null;
String urlString = "https://update.stalkedbythestate.com:8445/check/updatesavail";
try {
url = new URL(urlString);
} catch (MalformedURLException e) {
System.exit(1);
}
HttpsURLConnection httpsConn = null;
URLConnection conn = null;
InputStream in = null;
try {
conn = url.openConnection();
httpsConn = (HttpsURLConnection) conn;
conn.setUseCaches(false);
conn.setReadTimeout(30000);
conn.setRequestProperty("Content-Type",
"application/x-www-form-urlencoded");
try {
httpsConn.setRequestMethod("GET");
} catch (ProtocolException e1) {
System.exit(1);
}
StringBuffer sb = new StringBuffer();
sb.append("version=");
try {
sb.append(URLEncoder.encode(version, "UTF-8"));
} catch (UnsupportedEncodingException e) {
System.exit(1);
}
String outputString = sb.toString();
conn.setRequestProperty("Content-Length",
"" + Integer.toString(outputString.getBytes().length));
conn.setDoOutput(true);
DataOutputStream dataOutputStream = new DataOutputStream(
conn.getOutputStream());
dataOutputStream.writeBytes(outputString);
conn.getOutputStream().close();
// Consume and output input
in = conn.getInputStream();
byte[] buffer = new byte[1024];
int bytesRead = 0;
OutputStream out = System.out;
while ((bytesRead = conn.getInputStream().read(buffer)) > 0) {
out.write(buffer, 0, bytesRead);
}
} catch (IOException e) {
System.exit(1);
} finally {
try {
if (in != null)
in.close();
} catch (IOException e) {
}
}
System.exit(0);
}
}
|
[
"\"HOME\"",
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
java
| 1 | 0 | |
pytorch_lightning/core/lightning.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import inspect
import os
import re
import tempfile
from abc import ABC, abstractmethod
from argparse import Namespace
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import torch
import torch.distributed as torch_distrib
from torch import Tensor
from torch.nn import Module
from torch.nn.parallel import DistributedDataParallel
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from pytorch_lightning import _logger as log
from pytorch_lightning.core.grads import GradInformation
from pytorch_lightning.core.hooks import ModelHooks
from pytorch_lightning.core.memory import ModelSummary
from pytorch_lightning.core.saving import ALLOWED_CONFIG_TYPES, PRIMITIVE_TYPES, ModelIO
from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel
from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.utilities.device_dtype_mixin import DeviceDtypeModuleMixin
from pytorch_lightning.utilities.parsing import AttributeDict, collect_init_args, get_init_args
from pytorch_lightning.core.step_result import TrainResult, EvalResult
try:
import torch_xla.core.xla_model as xm
except ImportError:
XLA_AVAILABLE = False
else:
XLA_AVAILABLE = True
class LightningModule(ABC, DeviceDtypeModuleMixin, GradInformation, ModelIO, ModelHooks, Module):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.exp_save_path = None
#: The current epoch
self.current_epoch = 0
#: Total training batches seen across all epochs
self.global_step = 0
self.loaded_optimizer_states_dict = {}
#: Pointer to the trainer object
self.trainer = None
#: Pointer to the logger object
self.logger = None
#: True if using dp
self.use_dp = False
#: True if using ddp
self.use_ddp = False
#: True if using ddp2
self.use_ddp2 = False
# True if on tpu
self.use_tpu = False
#: True if using amp
self.use_amp = False
#: The precision used
self.precision = 32
# optionally can be set by user
self._example_input_array = None
self._datamodule = None
@property
def example_input_array(self) -> Any:
return self._example_input_array
@example_input_array.setter
def example_input_array(self, example: Any) -> None:
self._example_input_array = example
@property
def datamodule(self) -> Any:
return self._datamodule
@datamodule.setter
def datamodule(self, datamodule: Any) -> None:
self._datamodule = datamodule
@property
def on_gpu(self):
"""
True if your model is currently running on GPUs.
Useful to set flags around the LightningModule for different CPU vs GPU behavior.
"""
return self.device.type == 'cuda'
def print(self, *args, **kwargs) -> None:
r"""
Prints only from process 0. Use this in any distributed mode to log only once.
Args:
*args: The thing to print. Will be passed to Python's built-in print function.
**kwargs: Will be passed to Python's built-in print function.
Example:
.. code-block:: python
def forward(self, x):
self.print(x, 'in forward')
"""
if self.trainer.is_global_zero:
print(*args, **kwargs)
def forward(self, *args, **kwargs):
r"""
Same as :meth:`torch.nn.Module.forward()`, however in Lightning you want this to define
the operations you want to use for prediction (i.e.: on a server or as a feature extractor).
Normally you'd call ``self()`` from your :meth:`training_step` method.
This makes it easy to write a complex system for training with the outputs
you'd want in a prediction setting.
You may also find the :func:`~pytorch_lightning.core.decorators.auto_move_data` decorator useful
when using the module outside Lightning in a production setting.
Args:
*args: Whatever you decide to pass into the forward method.
**kwargs: Keyword arguments are also possible.
Return:
Predicted output
Examples:
.. code-block:: python
# example if we were using this model as a feature extractor
def forward(self, x):
feature_maps = self.convnet(x)
return feature_maps
def training_step(self, batch, batch_idx):
x, y = batch
feature_maps = self(x)
logits = self.classifier(feature_maps)
# ...
return loss
# splitting it this way allows model to be used a feature extractor
model = MyModelAbove()
inputs = server.get_request()
results = model(inputs)
server.write_results(results)
# -------------
# This is in stark contrast to torch.nn.Module where normally you would have this:
def forward(self, batch):
x, y = batch
feature_maps = self.convnet(x)
logits = self.classifier(feature_maps)
return logits
"""
def training_step(self, *args, **kwargs):
r"""
Here you compute and return the training loss and some additional metrics for e.g.
the progress bar or logger.
Args:
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):
The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
batch_idx (int): Integer displaying index of this batch
optimizer_idx (int): When using multiple optimizers, this argument will also be present.
hiddens(:class:`~torch.Tensor`): Passed in if
:paramref:`~pytorch_lightning.trainer.trainer.Trainer.truncated_bptt_steps` > 0.
Return:
:class:`~pytorch_lightning.core.step_result.TrainResult`
.. note:: :class:`~pytorch_lightning.core.step_result.TrainResult` is simply a Dict with convenient
functions for logging, distributed sync and error checking.
In this step you'd normally do the forward pass and calculate the loss for a batch.
You can also do fancier things like multiple forward passes or something model specific.
Example::
def training_step(self, batch, batch_idx):
x, y, z = batch
# implement your own
out = self(x)
loss = self.loss(out, x)
# TrainResult auto-detaches the loss after the optimization steps are complete
result = pl.TrainResult(minimize=loss)
The return object :class:`~pytorch_lightning.core.step_result.TrainResult` controls where to log,
when to log (step or epoch) and syncing with multiple GPUs.
.. code-block:: python
# log to progress bar and logger
result.log('train_loss', loss, prog_bar=True, logger=True)
# sync metric value across GPUs in distributed training
result.log('train_loss_2', loss, sync_dist=True)
# log to progress bar as well
result.log('train_loss_2', loss, prog_bar=True)
# assign arbitrary values
result.predictions = predictions
result.some_value = 'some_value'
If you define multiple optimizers, this step will be called with an additional
``optimizer_idx`` parameter.
.. code-block:: python
# Multiple optimizers (e.g.: GANs)
def training_step(self, batch, batch_idx, optimizer_idx):
if optimizer_idx == 0:
# do training_step with encoder
if optimizer_idx == 1:
# do training_step with decoder
If you add truncated back propagation through time you will also get an additional
argument with the hidden states of the previous step.
.. code-block:: python
# Truncated back-propagation through time
def training_step(self, batch, batch_idx, hiddens):
# hiddens are the hidden states from the previous truncated backprop step
...
out, hiddens = self.lstm(data, hiddens)
...
# TrainResult auto-detaches hiddens
result = pl.TrainResult(minimize=loss, hiddens=hiddens)
return result
Notes:
The loss value shown in the progress bar is smoothed (averaged) over the last values,
so it differs from the actual loss returned in train/validation step.
"""
rank_zero_warn('`training_step` must be implemented to be used with the Lightning Trainer')
def training_step_end(self, *args, **kwargs):
"""
Use this when training with dp or ddp2 because :meth:`training_step`
will operate on only part of the batch. However, this is still optional
and only needed for things like softmax or NCE loss.
Note:
If you later switch to ddp or some other mode, this will still be called
so that you don't have to change your code
.. code-block:: python
# pseudocode
sub_batches = split_batches_for_dp(batch)
batch_parts_outputs = [training_step(sub_batch) for sub_batch in sub_batches]
training_step_end(batch_parts_outputs)
Args:
batch_parts_outputs: What you return in `training_step` for each batch part.
Return:
:class:`~pytorch_lightning.core.step_result.TrainResult`
.. note:: :class:`~pytorch_lightning.core.step_result.TrainResult` is simply a Dict with convenient
functions for logging, distributed sync and error checking.
When using dp/ddp2 distributed backends, only a portion of the batch is inside the training_step:
.. code-block:: python
def training_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self(x)
# softmax uses only a portion of the batch in the denomintaor
loss = self.softmax(out)
loss = nce_loss(loss)
return pl.TrainResult(loss)
If you wish to do something with all the parts of the batch, then use this method to do it:
.. code-block:: python
def training_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self(x)
result = pl.TrainResult()
result.out = out
def training_step_end(self, training_step_outputs):
# this out is now the full size of the batch
all_outs = training_step_outputs.out
# this softmax now uses the full batch
loss = nce_loss(all_outs)
result = pl.TrainResult(loss)
return result
See Also:
See the :ref:`multi-gpu-training` guide for more details.
"""
def training_epoch_end(
self, outputs: Union[TrainResult, List[TrainResult]]
):
"""
Called at the end of the training epoch with the outputs of all training steps.
Use this in case you need to do something with all the outputs for every training_step.
.. code-block:: python
# the pseudocode for these calls
train_outs = []
for train_batch in train_data:
out = training_step(train_batch)
train_outs.append(out)
training_epoch_end(train_outs)
Args:
outputs: List of outputs you defined in :meth:`training_step`, or if there are
multiple dataloaders, a list containing a list of outputs for each dataloader.
Return:
:class:`~pytorch_lightning.core.step_result.TrainResult`
.. note:: :class:`~pytorch_lightning.core.step_result.TrainResult` is simply a Dict with convenient
functions for logging, distributed sync and error checking.
Note:
If this method is not overridden, this won't be called.
Example::
def training_epoch_end(self, training_step_outputs):
# do something with all training_step outputs
return result
With multiple dataloaders, ``outputs`` will be a list of lists. The outer list contains
one entry per dataloader, while the inner list contains the individual outputs of
each training step for that dataloader.
.. code-block:: python
def training_epoch_end(self, outputs):
epoch_result = pl.TrainResult()
for train_result in outputs:
all_losses = train_result.minimize
# do something with all losses
return results
"""
def validation_step(self, *args, **kwargs) -> EvalResult:
r"""
Operates on a single batch of data from the validation set.
In this step you'd might generate examples or calculate anything of interest like accuracy.
.. code-block:: python
# the pseudocode for these calls
val_outs = []
for val_batch in val_data:
out = validation_step(train_batch)
val_outs.append(out)
validation_epoch_end(val_outs)
Args:
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):
The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
batch_idx (int): The index of this batch
dataloader_idx (int): The index of the dataloader that produced this batch
(only if multiple val datasets used)
Return:
:class:`~pytorch_lightning.core.step_result.TrainResult`
.. code-block:: python
# pseudocode of order
out = validation_step()
if defined('validation_step_end'):
out = validation_step_end(out)
out = validation_epoch_end(out)
.. code-block:: python
# if you have one val dataloader:
def validation_step(self, batch, batch_idx)
# if you have multiple val dataloaders:
def validation_step(self, batch, batch_idx, dataloader_idx)
Examples:
.. code-block:: python
# CASE 1: A single validation dataset
def validation_step(self, batch, batch_idx):
x, y = batch
# implement your own
out = self(x)
loss = self.loss(out, y)
# log 6 example images
# or generated text... or whatever
sample_imgs = x[:6]
grid = torchvision.utils.make_grid(sample_imgs)
self.logger.experiment.add_image('example_images', grid, 0)
# calculate acc
labels_hat = torch.argmax(out, dim=1)
val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
# log the outputs!
result = pl.EvalResult(checkpoint_on=loss)
result.log_dict({'val_loss': loss, 'val_acc': val_acc})
return result
If you pass in multiple val datasets, validation_step will have an additional argument.
.. code-block:: python
# CASE 2: multiple validation datasets
def validation_step(self, batch, batch_idx, dataloader_idx):
# dataloader_idx tells you which dataset this is.
Note:
If you don't need to validate you don't need to implement this method.
Note:
When the :meth:`validation_step` is called, the model has been put in eval mode
and PyTorch gradients have been disabled. At the end of validation,
the model goes back to training mode and gradients are enabled.
"""
def validation_step_end(self, *args, **kwargs) -> EvalResult:
"""
Use this when validating with dp or ddp2 because :meth:`validation_step`
will operate on only part of the batch. However, this is still optional
and only needed for things like softmax or NCE loss.
Note:
If you later switch to ddp or some other mode, this will still be called
so that you don't have to change your code.
.. code-block:: python
# pseudocode
sub_batches = split_batches_for_dp(batch)
batch_parts_outputs = [validation_step(sub_batch) for sub_batch in sub_batches]
validation_step_end(batch_parts_outputs)
Args:
batch_parts_outputs: What you return in :meth:`validation_step`
for each batch part.
Return:
:class:`~pytorch_lightning.core.step_result.TrainResult`
.. code-block:: python
# WITHOUT validation_step_end
# if used in DP or DDP2, this batch is 1/num_gpus large
def validation_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self(x)
loss = self.softmax(out)
loss = nce_loss(loss)
result = pl.EvalResult()
result.log('val_loss', loss)
return result
# --------------
# with validation_step_end to do softmax over the full batch
def validation_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self(x)
result = pl.EvalResult()
result.out = out
return result
def validation_epoch_end(self, output_results):
# this out is now the full size of the batch
all_val_step_outs = output_results.out
loss = nce_loss(all_val_step_outs)
result = pl.EvalResult(checkpoint_on=loss)
result.log('val_loss', loss)
return result
See Also:
See the :ref:`multi-gpu-training` guide for more details.
"""
def validation_end(self, outputs):
"""
Warnings:
Deprecated in v0.7.0. Use :meth:`validation_epoch_end` instead.
Will be removed in 1.0.0.
"""
def validation_epoch_end(
self, outputs: Union[EvalResult, List[EvalResult]]
) -> EvalResult:
"""
Called at the end of the validation epoch with the outputs of all validation steps.
.. code-block:: python
# the pseudocode for these calls
val_outs = []
for val_batch in val_data:
out = validation_step(val_batch)
val_outs.append(out)
validation_epoch_end(val_outs)
Args:
outputs: List of outputs you defined in :meth:`validation_step`, or if there
are multiple dataloaders, a list containing a list of outputs for each dataloader.
Return:
:class:`~pytorch_lightning.core.step_result.TrainResult`
Note:
If you didn't define a :meth:`validation_step`, this won't be called.
- The outputs here are strictly for logging or progress bar.
- If you don't need to display anything, don't return anything.
Examples:
With a single dataloader:
.. code-block:: python
def validation_epoch_end(self, val_step_outputs):
# do something with the outputs of all val batches
all_val_preds = val_step_outputs.predictions
val_step_outputs.some_result = calc_all_results(all_val_preds)
return val_step_outputs
With multiple dataloaders, `outputs` will be a list of lists. The outer list contains
one entry per dataloader, while the inner list contains the individual outputs of
each validation step for that dataloader.
.. code-block:: python
def validation_epoch_end(self, outputs):
for dataloader_output_result in outputs:
dataloader_outs = dataloader_output_result.dataloader_i_outputs
result = pl.EvalResult()
result.log('final_metric', final_value)
return result
"""
def test_step(self, *args, **kwargs) -> EvalResult:
r"""
Operates on a single batch of data from the test set.
In this step you'd normally generate examples or calculate anything of interest
such as accuracy.
.. code-block:: python
# the pseudocode for these calls
test_outs = []
for test_batch in test_data:
out = test_step(test_batch)
test_outs.append(out)
test_epoch_end(test_outs)
Args:
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):
The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
batch_idx (int): The index of this batch.
dataloader_idx (int): The index of the dataloader that produced this batch
(only if multiple test datasets used).
Return:
:class:`~pytorch_lightning.core.step_result.TrainResult`
.. code-block:: python
# if you have one test dataloader:
def test_step(self, batch, batch_idx)
# if you have multiple test dataloaders:
def test_step(self, batch, batch_idx, dataloader_idx)
Examples:
.. code-block:: python
# CASE 1: A single test dataset
def test_step(self, batch, batch_idx):
x, y = batch
# implement your own
out = self(x)
loss = self.loss(out, y)
# log 6 example images
# or generated text... or whatever
sample_imgs = x[:6]
grid = torchvision.utils.make_grid(sample_imgs)
self.logger.experiment.add_image('example_images', grid, 0)
# calculate acc
labels_hat = torch.argmax(out, dim=1)
test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
# log the outputs!
result = pl.EvalResult(checkpoint_on=loss)
result.log_dict({'test_loss': loss, 'test_acc': test_acc})
return resultt
If you pass in multiple validation datasets, :meth:`test_step` will have an additional
argument.
.. code-block:: python
# CASE 2: multiple test datasets
def test_step(self, batch, batch_idx, dataloader_idx):
# dataloader_idx tells you which dataset this is.
Note:
If you don't need to validate you don't need to implement this method.
Note:
When the :meth:`test_step` is called, the model has been put in eval mode and
PyTorch gradients have been disabled. At the end of the test epoch, the model goes back
to training mode and gradients are enabled.
"""
def test_step_end(self, *args, **kwargs) -> EvalResult:
"""
Use this when testing with dp or ddp2 because :meth:`test_step` will operate
on only part of the batch. However, this is still optional
and only needed for things like softmax or NCE loss.
Note:
If you later switch to ddp or some other mode, this will still be called
so that you don't have to change your code.
.. code-block:: python
# pseudocode
sub_batches = split_batches_for_dp(batch)
batch_parts_outputs = [test_step(sub_batch) for sub_batch in sub_batches]
test_step_end(batch_parts_outputs)
Args:
batch_parts_outputs: What you return in :meth:`test_step` for each batch part.
Return:
:class:`~pytorch_lightning.core.step_result.TrainResult`
.. code-block:: python
# WITHOUT test_step_end
# if used in DP or DDP2, this batch is 1/num_gpus large
def test_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self(x)
loss = self.softmax(out)
loss = nce_loss(loss)
result = pl.EvalResult()
result.log('test_loss', loss)
return result
# --------------
# with test_step_end to do softmax over the full batch
def test_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self(x)
result = pl.EvalResult()
result.out = out
return result
def test_epoch_end(self, output_results):
# this out is now the full size of the batch
all_test_step_outs = output_results.out
loss = nce_loss(all_test_step_outs)
result = pl.EvalResult(checkpoint_on=loss)
result.log('test_loss', loss)
return result
See Also:
See the :ref:`multi-gpu-training` guide for more details.
"""
def test_end(self, outputs):
"""
Warnings:
Deprecated in v0.7.0. Use :meth:`test_epoch_end` instead.
Will be removed in 1.0.0.
"""
def test_epoch_end(
self, outputs: Union[EvalResult, List[EvalResult]]
) -> EvalResult:
"""
Called at the end of a test epoch with the output of all test steps.
.. code-block:: python
# the pseudocode for these calls
test_outs = []
for test_batch in test_data:
out = test_step(test_batch)
test_outs.append(out)
test_epoch_end(test_outs)
Args:
outputs: List of outputs you defined in :meth:`test_step_end`, or if there
are multiple dataloaders, a list containing a list of outputs for each dataloader
Return:
:class:`~pytorch_lightning.core.step_result.TrainResult`
Note:
If you didn't define a :meth:`test_step`, this won't be called.
- The outputs here are strictly for logging or progress bar.
- If you don't need to display anything, don't return anything.
Examples:
With a single dataloader:
.. code-block:: python
def test_epoch_end(self, outputs):
# do something with the outputs of all test batches
all_test_preds = test_step_outputs.predictions
test_step_outputs.some_result = calc_all_results(all_test_preds)
return test_step_outputs
With multiple dataloaders, `outputs` will be a list of lists. The outer list contains
one entry per dataloader, while the inner list contains the individual outputs of
each test step for that dataloader.
.. code-block:: python
def test_epoch_end(self, outputs):
for dataloader_output_result in outputs:
dataloader_outs = dataloader_output_result.dataloader_i_outputs
result = pl.EvalResult()
result.log('final_metric', final_value)
return results
"""
def configure_ddp(self, model: 'LightningModule', device_ids: List[int]) -> DistributedDataParallel:
r"""
Override to init DDP in your own way or with your own wrapper.
The only requirements are that:
1. On a validation batch, the call goes to ``model.validation_step``.
2. On a training batch, the call goes to ``model.training_step``.
3. On a testing batch, the call goes to ``model.test_step``.
Args:
model: the :class:`LightningModule` currently being optimized.
device_ids: the list of GPU ids.
Return:
DDP wrapped model
Examples:
.. code-block:: python
# default implementation used in Trainer
def configure_ddp(self, model, device_ids):
# Lightning DDP simply routes to test_step, val_step, etc...
model = LightningDistributedDataParallel(
model,
device_ids=device_ids,
find_unused_parameters=True
)
return model
"""
model = LightningDistributedDataParallel(model, device_ids=device_ids, find_unused_parameters=True)
return model
def _init_slurm_connection(self) -> None:
""""""
"""
Sets up environment variables necessary for pytorch distributed communications
based on slurm environment.
"""
# use slurm job id for the port number
# guarantees unique ports across jobs from same grid search
try:
# use the last 4 numbers in the job id as the id
default_port = os.environ['SLURM_JOB_ID']
default_port = default_port[-4:]
# all ports should be in the 10k+ range
default_port = int(default_port) + 15000
except Exception:
default_port = 12910
# if user gave a port number, use that one instead
try:
default_port = os.environ['MASTER_PORT']
except Exception:
os.environ['MASTER_PORT'] = str(default_port)
# figure out the root node addr
try:
root_node = os.environ['SLURM_NODELIST'].split(' ')[0]
except Exception:
root_node = '127.0.0.1'
root_node = self.trainer.resolve_root_node_address(root_node)
os.environ['MASTER_ADDR'] = root_node
def init_ddp_connection(self, global_rank: int, world_size: int, is_slurm_managing_tasks: bool = True) -> None:
"""
Override to define your custom way of setting up a distributed environment.
Lightning's implementation uses env:// init by default and sets the first node as root
for SLURM managed cluster.
Args:
global_rank: The global process idx.
world_size: Number of GPUs being use across all nodes. (num_nodes * num_gpus).
is_slurm_managing_tasks: is cluster managed by SLURM.
"""
if is_slurm_managing_tasks:
self._init_slurm_connection()
if 'MASTER_ADDR' not in os.environ:
rank_zero_warn("MASTER_ADDR environment variable is not defined. Set as localhost")
os.environ['MASTER_ADDR'] = '127.0.0.1'
log.debug(f"MASTER_ADDR: {os.environ['MASTER_ADDR']}")
if 'MASTER_PORT' not in os.environ:
rank_zero_warn("MASTER_PORT environment variable is not defined. Set as 12910")
os.environ['MASTER_PORT'] = '12910'
log.debug(f"MASTER_PORT: {os.environ['MASTER_PORT']}")
if 'WORLD_SIZE' in os.environ and int(os.environ['WORLD_SIZE']) != world_size:
rank_zero_warn(
f"WORLD_SIZE environment variable ({os.environ['WORLD_SIZE']}) "
f"is not equal to the computed world size ({world_size}). Ignored."
)
torch_backend = "nccl" if self.trainer.on_gpu else "gloo"
log.info(f"initializing ddp: GLOBAL_RANK: {global_rank}, MEMBER: {global_rank+1}/{world_size}")
torch_distrib.init_process_group(torch_backend, rank=global_rank, world_size=world_size)
def configure_sync_batchnorm(self, model: 'LightningModule') -> 'LightningModule':
"""
Add global batchnorm for a model spread across multiple GPUs and nodes.
Override to synchronize batchnorm between specific process groups instead
of the whole world or use a different sync_bn like `apex`'s version.
Args:
model: pointer to current :class:`LightningModule`.
Return:
LightningModule with batchnorm layers synchronized between process groups
"""
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model, process_group=None)
return model
def configure_apex(
self, amp: object, model: 'LightningModule', optimizers: List[Optimizer], amp_level: str
) -> Tuple['LightningModule', List[Optimizer]]:
r"""
Override to init AMP your own way.
Must return a model and list of optimizers.
Args:
amp: pointer to amp library object.
model: pointer to current :class:`LightningModule`.
optimizers: list of optimizers passed in :meth:`configure_optimizers`.
amp_level: AMP mode chosen ('O1', 'O2', etc...)
Return:
Apex wrapped model and optimizers
Examples:
.. code-block:: python
# Default implementation used by Trainer.
def configure_apex(self, amp, model, optimizers, amp_level):
model, optimizers = amp.initialize(
model, optimizers, opt_level=amp_level,
)
return model, optimizers
"""
model, optimizers = amp.initialize(model, optimizers, opt_level=amp_level)
return model, optimizers
def configure_optimizers(
self,
) -> Optional[Union[Optimizer, Sequence[Optimizer], Dict, Sequence[Dict], Tuple[List, List]]]:
r"""
Choose what optimizers and learning-rate schedulers to use in your optimization.
Normally you'd need one. But in the case of GANs or similar you might have multiple.
Return:
Any of these 6 options.
- Single optimizer.
- List or Tuple - List of optimizers.
- Two lists - The first list has multiple optimizers, the second a list of LR schedulers (or lr_dict).
- Dictionary, with an 'optimizer' key, and (optionally) a 'lr_scheduler' key which value is a single LR scheduler or lr_dict.
- Tuple of dictionaries as described, with an optional 'frequency' key.
- None - Fit will run without any optimizer.
Note:
The 'frequency' value is an int corresponding to the number of sequential batches
optimized with the specific optimizer. It should be given to none or to all of the optimizers.
There is a difference between passing multiple optimizers in a list,
and passing multiple optimizers in dictionaries with a frequency of 1:
In the former case, all optimizers will operate on the given batch in each optimization step.
In the latter, only one optimizer will operate on the given batch at every step.
The lr_dict is a dictionary which contains scheduler and its associated configuration.
It has five keys. The default configuration is shown below.
.. code-block:: python
{
'scheduler': lr_scheduler, # The LR schduler
'interval': 'epoch', # The unit of the scheduler's step size
'frequency': 1, # The frequency of the scheduler
'reduce_on_plateau': False, # For ReduceLROnPlateau scheduler
'monitor': 'val_loss' # Metric to monitor
}
If user only provides LR schedulers, then their configuration will set to default as shown above.
Examples:
.. code-block:: python
# most cases
def configure_optimizers(self):
opt = Adam(self.parameters(), lr=1e-3)
return opt
# multiple optimizer case (e.g.: GAN)
def configure_optimizers(self):
generator_opt = Adam(self.model_gen.parameters(), lr=0.01)
disriminator_opt = Adam(self.model_disc.parameters(), lr=0.02)
return generator_opt, disriminator_opt
# example with learning rate schedulers
def configure_optimizers(self):
generator_opt = Adam(self.model_gen.parameters(), lr=0.01)
disriminator_opt = Adam(self.model_disc.parameters(), lr=0.02)
discriminator_sched = CosineAnnealing(discriminator_opt, T_max=10)
return [generator_opt, disriminator_opt], [discriminator_sched]
# example with step-based learning rate schedulers
def configure_optimizers(self):
gen_opt = Adam(self.model_gen.parameters(), lr=0.01)
dis_opt = Adam(self.model_disc.parameters(), lr=0.02)
gen_sched = {'scheduler': ExponentialLR(gen_opt, 0.99),
'interval': 'step'} # called after each training step
dis_sched = CosineAnnealing(discriminator_opt, T_max=10) # called every epoch
return [gen_opt, dis_opt], [gen_sched, dis_sched]
# example with optimizer frequencies
# see training procedure in `Improved Training of Wasserstein GANs`, Algorithm 1
# https://arxiv.org/abs/1704.00028
def configure_optimizers(self):
gen_opt = Adam(self.model_gen.parameters(), lr=0.01)
dis_opt = Adam(self.model_disc.parameters(), lr=0.02)
n_critic = 5
return (
{'optimizer': dis_opt, 'frequency': n_critic},
{'optimizer': gen_opt, 'frequency': 1}
)
Note:
Some things to know:
- Lightning calls ``.backward()`` and ``.step()`` on each optimizer
and learning rate scheduler as needed.
- If you use 16-bit precision (``precision=16``), Lightning will automatically
handle the optimizers for you.
- If you use multiple optimizers, :meth:`training_step` will have an additional
``optimizer_idx`` parameter.
- If you use LBFGS Lightning handles the closure function automatically for you.
- If you use multiple optimizers, gradients will be calculated only
for the parameters of current optimizer at each training step.
- If you need to control how often those optimizers step or override the
default ``.step()`` schedule, override the :meth:`optimizer_step` hook.
- If you only want to call a learning rate scheduler every ``x`` step or epoch,
or want to monitor a custom metric, you can specify these in a lr_dict:
.. code-block:: python
{
'scheduler': lr_scheduler,
'interval': 'step', # or 'epoch'
'monitor': 'val_f1',
'frequency': x,
}
"""
rank_zero_warn('`configure_optimizers` must be implemented to be used with the Lightning Trainer')
def optimizer_step(
self,
epoch: int,
batch_idx: int,
optimizer: Optimizer,
optimizer_idx: int,
second_order_closure: Optional[Callable] = None,
on_tpu: bool = False,
using_native_amp: bool = False,
using_lbfgs: bool = False,
) -> None:
r"""
Override this method to adjust the default way the
:class:`~pytorch_lightning.trainer.trainer.Trainer` calls each optimizer.
By default, Lightning calls ``step()`` and ``zero_grad()`` as shown in the example
once per optimizer.
Args:
epoch: Current epoch
batch_idx: Index of current batch
optimizer: A PyTorch optimizer
optimizer_idx: If you used multiple optimizers this indexes into that list.
second_order_closure: closure for second order methods
on_tpu: true if TPU backward is required
using_native_amp: True if using native amp
using_lbfgs: True if the matching optimizer is lbfgs
Examples:
.. code-block:: python
# DEFAULT
def optimizer_step(self, current_epoch, batch_idx, optimizer, optimizer_idx,
second_order_closure, on_tpu, using_native_amp, using_lbfgs):
optimizer.step()
# Alternating schedule for optimizer steps (i.e.: GANs)
def optimizer_step(self, current_epoch, batch_idx, optimizer, optimizer_idx,
second_order_closure, on_tpu, using_native_amp, using_lbfgs):
# update generator opt every 2 steps
if optimizer_idx == 0:
if batch_idx % 2 == 0 :
optimizer.step()
optimizer.zero_grad()
# update discriminator opt every 4 steps
if optimizer_idx == 1:
if batch_idx % 4 == 0 :
optimizer.step()
optimizer.zero_grad()
# ...
# add as many optimizers as you want
Here's another example showing how to use this for more advanced things such as
learning rate warm-up:
.. code-block:: python
# learning rate warm-up
def optimizer_step(self, current_epoch, batch_idx, optimizer,
optimizer_idx, second_order_closure, on_tpu, using_native_amp, using_lbfgs):
# warm up lr
if self.trainer.global_step < 500:
lr_scale = min(1., float(self.trainer.global_step + 1) / 500.)
for pg in optimizer.param_groups:
pg['lr'] = lr_scale * self.learning_rate
# update params
optimizer.step()
optimizer.zero_grad()
Note:
If you also override the :meth:`~pytorch_lightning.core.hooks.ModelHooks.on_before_zero_grad`
model hook don't forget to add the call to it before ``optimizer.zero_grad()`` yourself.
"""
if on_tpu:
xm.optimizer_step(optimizer)
elif using_native_amp:
self.trainer.scaler.step(optimizer)
elif using_lbfgs:
optimizer.step(second_order_closure)
else:
optimizer.step()
def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int):
optimizer.zero_grad()
def tbptt_split_batch(self, batch: Tensor, split_size: int) -> list:
r"""
When using truncated backpropagation through time, each batch must be split along the
time dimension. Lightning handles this by default, but for custom behavior override
this function.
Args:
batch: Current batch
split_size: The size of the split
Return:
List of batch splits. Each split will be passed to :meth:`training_step` to enable truncated
back propagation through time. The default implementation splits root level Tensors and
Sequences at dim=1 (i.e. time dim). It assumes that each time dim is the same length.
Examples:
.. code-block:: python
def tbptt_split_batch(self, batch, split_size):
splits = []
for t in range(0, time_dims[0], split_size):
batch_split = []
for i, x in enumerate(batch):
if isinstance(x, torch.Tensor):
split_x = x[:, t:t + split_size]
elif isinstance(x, collections.Sequence):
split_x = [None] * len(x)
for batch_idx in range(len(x)):
split_x[batch_idx] = x[batch_idx][t:t + split_size]
batch_split.append(split_x)
splits.append(batch_split)
return splits
Note:
Called in the training loop after
:meth:`~pytorch_lightning.callbacks.base.Callback.on_batch_start`
if :paramref:`~pytorch_lightning.trainer.Trainer.truncated_bptt_steps` > 0.
Each returned batch split is passed separately to :meth:`training_step`.
"""
time_dims = [len(x[0]) for x in batch if isinstance(x, (torch.Tensor, collections.Sequence))]
assert len(time_dims) >= 1, "Unable to determine batch time dimension"
assert all(x == time_dims[0] for x in time_dims), "Batch time dimension length is ambiguous"
splits = []
for t in range(0, time_dims[0], split_size):
batch_split = []
for i, x in enumerate(batch):
if isinstance(x, torch.Tensor):
split_x = x[:, t : t + split_size]
elif isinstance(x, collections.Sequence):
split_x = [None] * len(x)
for batch_idx in range(len(x)):
split_x[batch_idx] = x[batch_idx][t : t + split_size]
batch_split.append(split_x)
splits.append(batch_split)
return splits
def prepare_data(self) -> None:
"""
Use this to download and prepare data.
.. warning:: DO NOT set state to the model (use `setup` instead)
since this is NOT called on every GPU in DDP/TPU
Example::
def prepare_data(self):
# good
download_data()
tokenize()
etc()
# bad
self.split = data_split
self.some_state = some_other_state()
In DDP prepare_data can be called in two ways (using Trainer(prepare_data_per_node)):
1. Once per node. This is the default and is only called on LOCAL_RANK=0.
2. Once in total. Only called on GLOBAL_RANK=0.
Example::
# DEFAULT
# called once per node on LOCAL_RANK=0 of that node
Trainer(prepare_data_per_node=True)
# call on GLOBAL_RANK=0 (great for shared file systems)
Trainer(prepare_data_per_node=False)
This is called before requesting the dataloaders:
.. code-block:: python
model.prepare_data()
if ddp/tpu: init()
model.setup(stage)
model.train_dataloader()
model.val_dataloader()
model.test_dataloader()
"""
def train_dataloader(self) -> DataLoader:
"""
Implement a PyTorch DataLoader for training.
Return:
Single PyTorch :class:`~torch.utils.data.DataLoader`.
The dataloader you return will not be called every epoch unless you set
:paramref:`~pytorch_lightning.trainer.Trainer.reload_dataloaders_every_epoch` to ``True``.
For data processing use the following pattern:
- download in :meth:`prepare_data`
- process and split in :meth:`setup`
However, the above are only necessary for distributed processing.
.. warning:: do not assign state in prepare_data
- :meth:`~pytorch_lightning.trainer.Trainer.fit`
- ...
- :meth:`prepare_data`
- :meth:`setup`
- :meth:`train_dataloader`
Note:
Lightning adds the correct sampler for distributed and arbitrary hardware.
There is no need to set it yourself.
Example:
.. code-block:: python
def train_dataloader(self):
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (1.0,))])
dataset = MNIST(root='/path/to/mnist/', train=True, transform=transform,
download=True)
loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=self.batch_size,
shuffle=True
)
return loader
"""
rank_zero_warn('`train_dataloader` must be implemented to be used with the Lightning Trainer')
def tng_dataloader(self): # todo: remove in v1.0.0
"""
Warnings:
Deprecated in v0.5.0. Use :meth:`train_dataloader` instead. Will be removed in 1.0.0.
"""
output = self.train_dataloader()
rank_zero_warn(
"`tng_dataloader` has been renamed to `train_dataloader` since v0.5.0."
" and this method will be removed in v1.0.0",
DeprecationWarning,
)
return output
def test_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
r"""
Implement one or multiple PyTorch DataLoaders for testing.
The dataloader you return will not be called every epoch unless you set
:paramref:`~pytorch_lightning.trainer.Trainer.reload_dataloaders_every_epoch` to ``True``.
For data processing use the following pattern:
- download in :meth:`prepare_data`
- process and split in :meth:`setup`
However, the above are only necessary for distributed processing.
.. warning:: do not assign state in prepare_data
- :meth:`~pytorch_lightning.trainer.Trainer.fit`
- ...
- :meth:`prepare_data`
- :meth:`setup`
- :meth:`train_dataloader`
- :meth:`val_dataloader`
- :meth:`test_dataloader`
Note:
Lightning adds the correct sampler for distributed and arbitrary hardware.
There is no need to set it yourself.
Return:
Single or multiple PyTorch DataLoaders.
Example:
.. code-block:: python
def test_dataloader(self):
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (1.0,))])
dataset = MNIST(root='/path/to/mnist/', train=False, transform=transform,
download=True)
loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=self.batch_size,
shuffle=False
)
return loader
# can also return multiple dataloaders
def test_dataloader(self):
return [loader_a, loader_b, ..., loader_n]
Note:
If you don't need a test dataset and a :meth:`test_step`, you don't need to implement
this method.
Note:
In the case where you return multiple test dataloaders, the :meth:`test_step`
will have an argument ``dataloader_idx`` which matches the order here.
"""
def val_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
r"""
Implement one or multiple PyTorch DataLoaders for validation.
The dataloader you return will not be called every epoch unless you set
:paramref:`~pytorch_lightning.trainer.Trainer.reload_dataloaders_every_epoch` to ``True``.
It's recommended that all data downloads and preparation happen in :meth:`prepare_data`.
- :meth:`~pytorch_lightning.trainer.Trainer.fit`
- ...
- :meth:`prepare_data`
- :meth:`train_dataloader`
- :meth:`val_dataloader`
- :meth:`test_dataloader`
Note:
Lightning adds the correct sampler for distributed and arbitrary hardware
There is no need to set it yourself.
Return:
Single or multiple PyTorch DataLoaders.
Examples:
.. code-block:: python
def val_dataloader(self):
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (1.0,))])
dataset = MNIST(root='/path/to/mnist/', train=False,
transform=transform, download=True)
loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=self.batch_size,
shuffle=False
)
return loader
# can also return multiple dataloaders
def val_dataloader(self):
return [loader_a, loader_b, ..., loader_n]
Note:
If you don't need a validation dataset and a :meth:`validation_step`, you don't need to
implement this method.
Note:
In the case where you return multiple validation dataloaders, the :meth:`validation_step`
will have an argument ``dataloader_idx`` which matches the order here.
"""
def summarize(self, mode: str = ModelSummary.MODE_DEFAULT) -> ModelSummary:
model_summary = ModelSummary(self, mode=mode)
log.info('\n' + str(model_summary))
return model_summary
def freeze(self) -> None:
r"""
Freeze all params for inference.
Example:
.. code-block:: python
model = MyLightningModule(...)
model.freeze()
"""
for param in self.parameters():
param.requires_grad = False
self.eval()
def unfreeze(self) -> None:
"""
Unfreeze all parameters for training.
.. code-block:: python
model = MyLightningModule(...)
model.unfreeze()
"""
for param in self.parameters():
param.requires_grad = True
self.train()
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
r"""
Called by Lightning to restore your model.
If you saved something with :meth:`on_save_checkpoint` this is your chance to restore this.
Args:
checkpoint: Loaded checkpoint
Example:
.. code-block:: python
def on_load_checkpoint(self, checkpoint):
# 99% of the time you don't need to implement this method
self.something_cool_i_want_to_save = checkpoint['something_cool_i_want_to_save']
Note:
Lightning auto-restores global step, epoch, and train state including amp scaling.
There is no need for you to restore anything regarding training.
"""
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
r"""
Called by Lightning when saving a checkpoint to give you a chance to store anything
else you might want to save.
Args:
checkpoint: Checkpoint to be saved
Example:
.. code-block:: python
def on_save_checkpoint(self, checkpoint):
# 99% of use cases you don't need to implement this method
checkpoint['something_cool_i_want_to_save'] = my_cool_pickable_object
Note:
Lightning saves all aspects of training (epoch, global step, etc...)
including amp scaling.
There is no need for you to store anything about training.
"""
def get_progress_bar_dict(self) -> Dict[str, Union[int, str]]:
r"""
Implement this to override the default items displayed in the progress bar.
By default it includes the average loss value, split index of BPTT (if used)
and the version of the experiment when using a logger.
.. code-block::
Epoch 1: 4%|▎ | 40/1095 [00:03<01:37, 10.84it/s, loss=4.501, v_num=10]
Here is an example how to override the defaults:
.. code-block:: python
def get_progress_bar_dict(self):
# don't show the version number
items = super().get_progress_bar_dict()
items.pop("v_num", None)
return items
Return:
Dictionary with the items to be displayed in the progress bar.
"""
# call .item() only once but store elements without graphs
running_train_loss = self.trainer.running_loss.mean()
avg_training_loss = running_train_loss.cpu().item() if running_train_loss is not None else float('NaN')
tqdm_dict = {'loss': '{:.3f}'.format(avg_training_loss)}
if self.trainer.truncated_bptt_steps is not None:
tqdm_dict['split_idx'] = self.trainer.split_idx
if self.trainer.logger is not None and self.trainer.logger.version is not None:
version = self.trainer.logger.version
# show last 4 places of long version strings
version = version[-4:] if isinstance(version, str) else version
tqdm_dict['v_num'] = version
return tqdm_dict
def get_tqdm_dict(self) -> Dict[str, Union[int, str]]:
"""
Additional items to be displayed in the progress bar.
Return:
Dictionary with the items to be displayed in the progress bar.
Warning:
Deprecated since v0.7.3.
Use :meth:`get_progress_bar_dict` instead.
"""
rank_zero_warn(
"`get_tqdm_dict` was renamed to `get_progress_bar_dict` in v0.7.3"
" and this method will be removed in v1.0.0",
DeprecationWarning,
)
return self.get_progress_bar_dict()
@classmethod
def _auto_collect_arguments(cls, frame=None) -> Tuple[Dict, Dict]:
""""""
"""
Collect all module arguments in the current constructor and all child constructors.
The child constructors are all the ``__init__`` methods that reach the current class through
(chained) ``super().__init__()`` calls.
Args:
frame: instance frame
Returns:
self_arguments: arguments dictionary of the first instance
parents_arguments: arguments dictionary of the parent's instances
"""
if not frame:
frame = inspect.currentframe()
frame_args = collect_init_args(frame.f_back, [])
self_arguments = frame_args[-1]
# set module_arguments in child
self_arguments = self_arguments
parents_arguments = {}
# add all arguments from parents
for args in frame_args[:-1]:
parents_arguments.update(args)
return self_arguments, parents_arguments
def save_hyperparameters(self, *args, frame=None) -> None:
"""Save all model arguments.
Args:
args: single object of `dict`, `NameSpace` or `OmegaConf`
or string names or argumenst from class `__init__`
>>> from collections import OrderedDict
>>> class ManuallyArgsModel(LightningModule):
... def __init__(self, arg1, arg2, arg3):
... super().__init__()
... # manually assign arguments
... self.save_hyperparameters('arg1', 'arg3')
... def forward(self, *args, **kwargs):
... ...
>>> model = ManuallyArgsModel(1, 'abc', 3.14)
>>> model.hparams
"arg1": 1
"arg3": 3.14
>>> class AutomaticArgsModel(LightningModule):
... def __init__(self, arg1, arg2, arg3):
... super().__init__()
... # equivalent automatic
... self.save_hyperparameters()
... def forward(self, *args, **kwargs):
... ...
>>> model = AutomaticArgsModel(1, 'abc', 3.14)
>>> model.hparams
"arg1": 1
"arg2": abc
"arg3": 3.14
>>> class SingleArgModel(LightningModule):
... def __init__(self, params):
... super().__init__()
... # manually assign single argument
... self.save_hyperparameters(params)
... def forward(self, *args, **kwargs):
... ...
>>> model = SingleArgModel(Namespace(p1=1, p2='abc', p3=3.14))
>>> model.hparams
"p1": 1
"p2": abc
"p3": 3.14
"""
if not frame:
frame = inspect.currentframe().f_back
init_args = get_init_args(frame)
assert init_args, 'failed to inspect the self init'
if not args:
hp = init_args
self._hparams_name = 'kwargs' if hp else None
else:
isx_non_str = [i for i, arg in enumerate(args) if not isinstance(arg, str)]
if len(isx_non_str) == 1:
hp = args[isx_non_str[0]]
cand_names = [k for k, v in init_args.items() if v == hp]
self._hparams_name = cand_names[0] if cand_names else None
else:
hp = {arg: init_args[arg] for arg in args if isinstance(arg, str)}
self._hparams_name = 'kwargs'
# `hparams` are expected here
if hp:
self._set_hparams(hp)
def _set_hparams(self, hp: Union[dict, Namespace, str]) -> None:
if isinstance(hp, Namespace):
hp = vars(hp)
if isinstance(hp, dict):
hp = AttributeDict(hp)
elif isinstance(hp, PRIMITIVE_TYPES):
raise ValueError(f'Primitives {PRIMITIVE_TYPES} are not allowed.')
elif not isinstance(hp, ALLOWED_CONFIG_TYPES):
raise ValueError(f'Unsupported config type of {type(hp)}.')
if isinstance(hp, dict) and isinstance(self.hparams, dict):
self.hparams.update(hp)
else:
self._hparams = hp
def to_onnx(self, file_path: str, input_sample: Optional[Tensor] = None, **kwargs):
"""Saves the model in ONNX format
Args:
file_path: The path of the file the model should be saved to.
input_sample: A sample of an input tensor for tracing.
**kwargs: Will be passed to torch.onnx.export function.
Example:
>>> class SimpleModel(LightningModule):
... def __init__(self):
... super().__init__()
... self.l1 = torch.nn.Linear(in_features=64, out_features=4)
...
... def forward(self, x):
... return torch.relu(self.l1(x.view(x.size(0), -1)))
>>> with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as tmpfile:
... model = SimpleModel()
... input_sample = torch.randn((1, 64))
... model.to_onnx(tmpfile.name, input_sample, export_params=True)
... os.path.isfile(tmpfile.name)
True
"""
if isinstance(input_sample, Tensor):
input_data = input_sample
elif self.example_input_array is not None:
input_data = self.example_input_array
else:
if input_sample is not None:
raise ValueError(f'Received `input_sample` of type {type(input_sample)}. Expected type is `Tensor`')
else:
raise ValueError('Could not export to ONNX since neither `input_sample` nor'
' `model.example_input_array` attribute is set.')
input_data = input_data.to(self.device)
if 'example_outputs' not in kwargs:
self.eval()
with torch.no_grad():
kwargs['example_outputs'] = self(input_data)
torch.onnx.export(self, input_data, file_path, **kwargs)
@property
def hparams(self) -> Union[AttributeDict, str]:
if not hasattr(self, '_hparams'):
self._hparams = AttributeDict()
return self._hparams
@hparams.setter
def hparams(self, hp: Union[dict, Namespace, Any]):
hparams_assignment_name = self.__get_hparams_assignment_variable()
self._hparams_name = hparams_assignment_name
self._set_hparams(hp)
def __get_hparams_assignment_variable(self):
""""""
"""
looks at the code of the class to figure out what the user named self.hparams
this only happens when the user explicitly sets self.hparams
"""
try:
class_code = inspect.getsource(self.__class__)
lines = class_code.split('\n')
for line in lines:
line = re.sub(r"\s+", "", line, flags=re.UNICODE)
if '.hparams=' in line:
return line.split('=')[1]
except Exception as e:
return 'hparams'
return None
|
[] |
[] |
[
"MASTER_ADDR",
"MASTER_PORT",
"SLURM_JOB_ID",
"SLURM_NODELIST",
"WORLD_SIZE"
] |
[]
|
["MASTER_ADDR", "MASTER_PORT", "SLURM_JOB_ID", "SLURM_NODELIST", "WORLD_SIZE"]
|
python
| 5 | 0 | |
service_runner/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
from service_runner.service_runner.settings import CURRENT_DIR, BASE_DIR
import os
import sys
import shutil
def main():
if os.path.exists(CURRENT_DIR + '/log'):
pass
else:
os.makedirs(CURRENT_DIR + '/log')
if os.path.exists(CURRENT_DIR + '/media'):
pass
else:
os.makedirs(CURRENT_DIR + '/media')
if os.path.exists(CURRENT_DIR + '/custom_settings.py'):
sys.path.append(CURRENT_DIR)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'custom_settings')
else:
shutil.copyfile(BASE_DIR + '/custom_settings_tmp.py',
CURRENT_DIR + '/custom_settings.py')
sys.path.append(CURRENT_DIR)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'custom_settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
core/src/main/java/hudson/lifecycle/WindowsServiceLifecycle.java
|
/*
* The MIT License
*
* Copyright (c) 2004-2009, Sun Microsystems, Inc., Kohsuke Kawaguchi
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package hudson.lifecycle;
import static hudson.util.jna.Kernel32.MOVEFILE_DELAY_UNTIL_REBOOT;
import static hudson.util.jna.Kernel32.MOVEFILE_REPLACE_EXISTING;
import hudson.FilePath;
import hudson.Launcher.LocalLauncher;
import hudson.Util;
import hudson.util.StreamTaskListener;
import hudson.util.jna.Kernel32;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.net.URL;
import java.util.logging.Level;
import java.util.logging.Logger;
import jenkins.model.Jenkins;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.output.ByteArrayOutputStream;
/**
* {@link Lifecycle} for Hudson installed as Windows service.
*
* @author Kohsuke Kawaguchi
* @see WindowsInstallerLink
*/
public class WindowsServiceLifecycle extends Lifecycle {
public WindowsServiceLifecycle() {
updateJenkinsExeIfNeeded();
}
/**
* If {@code jenkins.exe} is old compared to our copy,
* schedule an overwrite (except that since it's currently running,
* we can only do it when Jenkins restarts next time.)
*/
private void updateJenkinsExeIfNeeded() {
try {
File baseDir = getBaseDir();
URL exe = getClass().getResource("/windows-service/jenkins.exe");
String ourCopy = Util.getDigestOf(exe.openStream());
for (String name : new String[]{"hudson.exe", "jenkins.exe"}) {
try {
File currentCopy = new File(baseDir, name);
if (!currentCopy.exists()) continue;
String curCopy = new FilePath(currentCopy).digest();
if (ourCopy.equals(curCopy)) continue; // identical
File stage = new File(baseDir, name + ".new");
FileUtils.copyURLToFile(exe, stage);
Kernel32.INSTANCE.MoveFileExA(stage.getAbsolutePath(), currentCopy.getAbsolutePath(), MOVEFILE_DELAY_UNTIL_REBOOT | MOVEFILE_REPLACE_EXISTING);
LOGGER.info("Scheduled a replacement of " + name);
} catch (IOException e) {
LOGGER.log(Level.SEVERE, "Failed to replace " + name, e);
} catch (InterruptedException e) {
}
}
} catch (IOException e) {
LOGGER.log(Level.SEVERE, "Failed to replace jenkins.exe", e);
}
}
/**
* On Windows, jenkins.war is locked, so we place a new version under a special name,
* which is picked up by the service wrapper upon restart.
*/
@Override
public void rewriteHudsonWar(File by) throws IOException {
File dest = getHudsonWar();
// this should be impossible given the canRewriteHudsonWar method,
// but let's be defensive
if (dest == null) throw new IOException("jenkins.war location is not known.");
// backing up the old jenkins.war before its lost due to upgrading
// unless we are trying to rewrite jenkins.war by a backup itself
File bak = new File(dest.getPath() + ".bak");
if (!by.equals(bak))
FileUtils.copyFile(dest, bak);
String baseName = dest.getName();
baseName = baseName.substring(0, baseName.indexOf('.'));
File baseDir = getBaseDir();
File copyFiles = new File(baseDir, baseName + ".copies");
try (FileWriter w = new FileWriter(copyFiles, true)) {
w.write(by.getAbsolutePath() + '>' + getHudsonWar().getAbsolutePath() + '\n');
}
}
@Override
public void restart() throws IOException, InterruptedException {
Jenkins jenkins = Jenkins.getInstanceOrNull();
try {
if (jenkins != null) {
jenkins.cleanUp();
}
} catch (Throwable e) {
LOGGER.log(Level.SEVERE, "Failed to clean up. Restart will continue.", e);
}
File me = getHudsonWar();
File home = me.getParentFile();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
StreamTaskListener task = new StreamTaskListener(baos);
task.getLogger().println("Restarting a service");
String exe = System.getenv("WINSW_EXECUTABLE");
File executable;
if (exe != null) executable = new File(exe);
else executable = new File(home, "hudson.exe");
if (!executable.exists()) executable = new File(home, "jenkins.exe");
// use restart! to run hudson/jenkins.exe restart in a separate process, so it doesn't kill itself
int r = new LocalLauncher(task).launch().cmds(executable, "restart!")
.stdout(task).pwd(home).join();
if (r != 0)
throw new IOException(baos.toString());
}
private static File getBaseDir() {
File baseDir;
String baseEnv = System.getenv("BASE");
if (baseEnv != null) {
baseDir = new File(baseEnv);
} else {
LOGGER.log(Level.WARNING, "Could not find environment variable 'BASE' for Jenkins base directory. Falling back to JENKINS_HOME");
baseDir = Jenkins.get().getRootDir();
}
return baseDir;
}
private static final Logger LOGGER = Logger.getLogger(WindowsServiceLifecycle.class.getName());
}
|
[
"\"WINSW_EXECUTABLE\"",
"\"BASE\""
] |
[] |
[
"WINSW_EXECUTABLE",
"BASE"
] |
[]
|
["WINSW_EXECUTABLE", "BASE"]
|
java
| 2 | 0 | |
packages/@aws-cdk/aws-eks/lib/kubectl-handler/apply/__init__.py
|
import json
import logging
import os
import subprocess
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# these are coming from the kubectl layer
os.environ['PATH'] = '/opt/kubectl:/opt/awscli:' + os.environ['PATH']
outdir = os.environ.get('TEST_OUTDIR', '/tmp')
kubeconfig = os.path.join(outdir, 'kubeconfig')
def apply_handler(event, context):
logger.info(json.dumps(event))
request_type = event['RequestType']
props = event['ResourceProperties']
# resource properties (all required)
cluster_name = props['ClusterName']
manifest_text = props['Manifest']
role_arn = props['RoleArn']
prune_label = props.get('PruneLabel', None)
overwrite = props.get('Overwrite', 'false').lower() == 'true'
skip_validation = props.get('SkipValidation', 'false').lower() == 'true'
# "log in" to the cluster
cmd = [ 'aws', 'eks', 'update-kubeconfig',
'--role-arn', role_arn,
'--name', cluster_name,
'--kubeconfig', kubeconfig
]
logger.info(f'Running command: {cmd}')
subprocess.check_call(cmd)
if os.path.isfile(kubeconfig):
os.chmod(kubeconfig, 0o600)
# write resource manifests in sequence: { r1 }{ r2 }{ r3 } (this is how
# a stream of JSON objects can be included in a k8s manifest).
manifest_list = json.loads(manifest_text)
manifest_file = os.path.join(outdir, 'manifest.yaml')
with open(manifest_file, "w") as f:
f.writelines(map(lambda obj: json.dumps(obj), manifest_list))
logger.info("manifest written to: %s" % manifest_file)
kubectl_opts = []
if skip_validation:
kubectl_opts.extend(['--validate=false'])
if request_type == 'Create':
# if "overwrite" is enabled, then we use "apply" for CREATE operations
# which technically means we can determine the desired state of an
# existing resource.
if overwrite:
kubectl('apply', manifest_file, *kubectl_opts)
else:
# --save-config will allow us to use "apply" later
kubectl_opts.extend(['--save-config'])
kubectl('create', manifest_file, *kubectl_opts)
elif request_type == 'Update':
if prune_label is not None:
kubectl_opts.extend(['--prune', '-l', prune_label])
kubectl('apply', manifest_file, *kubectl_opts)
elif request_type == "Delete":
try:
kubectl('delete', manifest_file)
except Exception as e:
logger.info("delete error: %s" % e)
def kubectl(verb, file, *opts):
maxAttempts = 3
retry = maxAttempts
while retry > 0:
try:
cmd = ['kubectl', verb, '--kubeconfig', kubeconfig, '-f', file] + list(opts)
logger.info(f'Running command: {cmd}')
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
output = exc.output
if b'i/o timeout' in output and retry > 0:
retry = retry - 1
logger.info("kubectl timed out, retries left: %s" % retry)
else:
raise Exception(output)
else:
logger.info(output)
return
raise Exception(f'Operation failed after {maxAttempts} attempts: {output}')
|
[] |
[] |
[
"PATH",
"TEST_OUTDIR"
] |
[]
|
["PATH", "TEST_OUTDIR"]
|
python
| 2 | 0 | |
test/e2e_test.go
|
//
// Copyright 2021 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build e2e
// +build e2e
package test
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"net/http/httptest"
"net/url"
"os"
"path"
"path/filepath"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/registry"
"github.com/google/go-containerregistry/pkg/v1/random"
"github.com/google/go-containerregistry/pkg/v1/remote"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/sigstore/cosign/cmd/cosign/cli"
"github.com/sigstore/cosign/cmd/cosign/cli/attach"
"github.com/sigstore/cosign/cmd/cosign/cli/download"
"github.com/sigstore/cosign/cmd/cosign/cli/generate"
"github.com/sigstore/cosign/cmd/cosign/cli/options"
"github.com/sigstore/cosign/cmd/cosign/cli/sign"
"github.com/sigstore/cosign/cmd/cosign/cli/upload"
cliverify "github.com/sigstore/cosign/cmd/cosign/cli/verify"
ociremote "github.com/sigstore/cosign/internal/oci/remote"
"github.com/sigstore/cosign/pkg/cosign"
"github.com/sigstore/cosign/pkg/cosign/kubernetes"
cremote "github.com/sigstore/cosign/pkg/cosign/remote"
"github.com/sigstore/cosign/pkg/sget"
"github.com/sigstore/sigstore/pkg/signature/payload"
)
const (
serverEnv = "REKOR_SERVER"
rekorURL = "https://rekor.sigstore.dev"
)
var keyPass = []byte("hello")
var passFunc = func(_ bool) ([]byte, error) {
return keyPass, nil
}
var verify = func(keyRef, imageRef string, checkClaims bool, annotations map[string]interface{}, attachment string) error {
cmd := cliverify.VerifyCommand{
KeyRef: keyRef,
RekorURL: rekorURL,
CheckClaims: checkClaims,
Annotations: &annotations,
Attachment: attachment,
}
args := []string{imageRef}
return cmd.Exec(context.Background(), args)
}
func TestSignVerify(t *testing.T) {
repo, stop := reg(t)
defer stop()
td := t.TempDir()
imgName := path.Join(repo, "cosign-e2e")
_, _, cleanup := mkimage(t, imgName)
defer cleanup()
_, privKeyPath, pubKeyPath := keypair(t, td)
ctx := context.Background()
// Verify should fail at first
mustErr(verify(pubKeyPath, imgName, true, nil, ""), t)
// So should download
mustErr(download.SignatureCmd(ctx, options.RegistryOpts{}, imgName), t)
// Now sign the image
ko := sign.KeyOpts{KeyRef: privKeyPath, PassFunc: passFunc}
must(sign.SignCmd(ctx, ko, options.RegistryOpts{}, nil, []string{imgName}, "", true, "", false, false, ""), t)
// Now verify and download should work!
must(verify(pubKeyPath, imgName, true, nil, ""), t)
must(download.SignatureCmd(ctx, options.RegistryOpts{}, imgName), t)
// Look for a specific annotation
mustErr(verify(pubKeyPath, imgName, true, map[string]interface{}{"foo": "bar"}, ""), t)
// Sign the image with an annotation
annotations := map[string]interface{}{"foo": "bar"}
must(sign.SignCmd(ctx, ko, options.RegistryOpts{}, annotations, []string{imgName}, "", true, "", false, false, ""), t)
// It should match this time.
must(verify(pubKeyPath, imgName, true, map[string]interface{}{"foo": "bar"}, ""), t)
// But two doesn't work
mustErr(verify(pubKeyPath, imgName, true, map[string]interface{}{"foo": "bar", "baz": "bat"}, ""), t)
}
func TestSignVerifyClean(t *testing.T) {
repo, stop := reg(t)
defer stop()
td := t.TempDir()
imgName := path.Join(repo, "cosign-e2e")
_, _, _ = mkimage(t, imgName)
_, privKeyPath, pubKeyPath := keypair(t, td)
ctx := context.Background()
// Now sign the image
ko := sign.KeyOpts{KeyRef: privKeyPath, PassFunc: passFunc}
must(sign.SignCmd(ctx, ko, options.RegistryOpts{}, nil, []string{imgName}, "", true, "", false, false, ""), t)
// Now verify and download should work!
must(verify(pubKeyPath, imgName, true, nil, ""), t)
must(download.SignatureCmd(ctx, options.RegistryOpts{}, imgName), t)
// Now clean signature from the given image
must(cli.CleanCmd(ctx, options.RegistryOpts{}, imgName), t)
// It doesn't work
mustErr(verify(pubKeyPath, imgName, true, nil, ""), t)
}
func TestAttestVerify(t *testing.T) {
repo, stop := reg(t)
defer stop()
td := t.TempDir()
imgName := path.Join(repo, "cosign-attest-e2e")
_, _, cleanup := mkimage(t, imgName)
defer cleanup()
_, privKeyPath, pubKeyPath := keypair(t, td)
ctx := context.Background()
// Verify should fail at first
verifyAttestation := cliverify.VerifyAttestationCommand{
KeyRef: pubKeyPath,
}
attestation := "helloworld"
ap := filepath.Join(td, "attestation")
if err := ioutil.WriteFile(ap, []byte(attestation), 0600); err != nil {
t.Fatal(err)
}
mustErr(verifyAttestation.Exec(ctx, []string{imgName}), t)
// Now attest the image
ko := sign.KeyOpts{KeyRef: privKeyPath, PassFunc: passFunc}
must(cli.AttestCmd(ctx, ko, options.RegistryOpts{}, imgName, "", true, ap, false, "custom"), t)
// Now verify and download should work!
must(verifyAttestation.Exec(ctx, []string{imgName}), t)
// Look for a specific annotation
mustErr(verify(pubKeyPath, imgName, true, map[string]interface{}{"foo": "bar"}, ""), t)
}
func TestBundle(t *testing.T) {
// turn on the tlog
defer setenv(t, options.ExperimentalEnv, "1")()
repo, stop := reg(t)
defer stop()
td := t.TempDir()
imgName := path.Join(repo, "cosign-e2e")
_, _, cleanup := mkimage(t, imgName)
defer cleanup()
_, privKeyPath, pubKeyPath := keypair(t, td)
ctx := context.Background()
ko := sign.KeyOpts{
KeyRef: privKeyPath,
PassFunc: passFunc,
RekorURL: rekorURL,
}
// Sign the image
must(sign.SignCmd(ctx, ko, options.RegistryOpts{}, nil, []string{imgName}, "", true, "", false, false, ""), t)
// Make sure verify works
must(verify(pubKeyPath, imgName, true, nil, ""), t)
// Make sure offline verification works with bundling
// use rekor prod since we have hardcoded the public key
os.Setenv(serverEnv, "notreal")
must(verify(pubKeyPath, imgName, true, nil, ""), t)
}
func TestDuplicateSign(t *testing.T) {
repo, stop := reg(t)
defer stop()
td := t.TempDir()
imgName := path.Join(repo, "cosign-e2e")
ref, _, cleanup := mkimage(t, imgName)
defer cleanup()
_, privKeyPath, pubKeyPath := keypair(t, td)
ctx := context.Background()
// Verify should fail at first
mustErr(verify(pubKeyPath, imgName, true, nil, ""), t)
// So should download
mustErr(download.SignatureCmd(ctx, options.RegistryOpts{}, imgName), t)
// Now sign the image
ko := sign.KeyOpts{KeyRef: privKeyPath, PassFunc: passFunc}
must(sign.SignCmd(ctx, ko, options.RegistryOpts{}, nil, []string{imgName}, "", true, "", false, false, ""), t)
// Now verify and download should work!
must(verify(pubKeyPath, imgName, true, nil, ""), t)
must(download.SignatureCmd(ctx, options.RegistryOpts{}, imgName), t)
// Signing again should work just fine...
must(sign.SignCmd(ctx, ko, options.RegistryOpts{}, nil, []string{imgName}, "", true, "", false, false, ""), t)
se, err := ociremote.SignedEntity(ref, ociremote.WithRemoteOptions(registryClientOpts(ctx)...))
must(err, t)
sigs, err := se.Signatures()
must(err, t)
signatures, err := sigs.Get()
must(err, t)
if len(signatures) > 1 {
t.Errorf("expected there to only be one signature, got %v", signatures)
}
}
func TestKeyURLVerify(t *testing.T) {
// TODO: re-enable once distroless images are being signed by the new client
t.Skip()
// Verify that an image can be verified via key url
keyRef := "https://raw.githubusercontent.com/GoogleContainerTools/distroless/main/cosign.pub"
img := "gcr.io/distroless/base:latest"
must(verify(keyRef, img, true, nil, ""), t)
}
func TestGenerateKeyPairEnvVar(t *testing.T) {
defer setenv(t, "COSIGN_PASSWORD", "foo")()
keys, err := cosign.GenerateKeyPair(generate.GetPass)
if err != nil {
t.Fatal(err)
}
if _, err := cosign.LoadECDSAPrivateKey(keys.PrivateBytes, []byte("foo")); err != nil {
t.Fatal(err)
}
}
func TestGenerateKeyPairK8s(t *testing.T) {
td := t.TempDir()
wd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
if err := os.Chdir(td); err != nil {
t.Fatal(err)
}
defer func() {
os.Chdir(wd)
}()
password := "foo"
defer setenv(t, "COSIGN_PASSWORD", password)()
ctx := context.Background()
name := "cosign-secret"
namespace := "default"
if err := kubernetes.KeyPairSecret(ctx, fmt.Sprintf("k8s://%s/%s", namespace, name), generate.GetPass); err != nil {
t.Fatal(err)
}
// make sure the secret actually exists
client, err := kubernetes.Client()
if err != nil {
t.Fatal(err)
}
s, err := client.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
if v, ok := s.Data["cosign.password"]; !ok || string(v) != password {
t.Fatalf("password is incorrect, got %v expected %v", v, "foo")
}
}
func TestMultipleSignatures(t *testing.T) {
repo, stop := reg(t)
defer stop()
td1 := t.TempDir()
td2 := t.TempDir()
imgName := path.Join(repo, "cosign-e2e")
_, _, cleanup := mkimage(t, imgName)
defer cleanup()
_, priv1, pub1 := keypair(t, td1)
_, priv2, pub2 := keypair(t, td2)
ctx := context.Background()
// Verify should fail at first for both keys
mustErr(verify(pub1, imgName, true, nil, ""), t)
mustErr(verify(pub2, imgName, true, nil, ""), t)
// Now sign the image with one key
ko := sign.KeyOpts{KeyRef: priv1, PassFunc: passFunc}
must(sign.SignCmd(ctx, ko, options.RegistryOpts{}, nil, []string{imgName}, "", true, "", false, false, ""), t)
// Now verify should work with that one, but not the other
must(verify(pub1, imgName, true, nil, ""), t)
mustErr(verify(pub2, imgName, true, nil, ""), t)
// Now sign with the other key too
ko.KeyRef = priv2
must(sign.SignCmd(ctx, ko, options.RegistryOpts{}, nil, []string{imgName}, "", true, "", false, false, ""), t)
// Now verify should work with both
must(verify(pub1, imgName, true, nil, ""), t)
must(verify(pub2, imgName, true, nil, ""), t)
}
func TestSignBlob(t *testing.T) {
var blob = "someblob"
td1 := t.TempDir()
td2 := t.TempDir()
t.Cleanup(func() {
os.RemoveAll(td1)
os.RemoveAll(td2)
})
bp := filepath.Join(td1, blob)
if err := ioutil.WriteFile(bp, []byte(blob), 0644); err != nil {
t.Fatal(err)
}
_, privKeyPath1, pubKeyPath1 := keypair(t, td1)
_, _, pubKeyPath2 := keypair(t, td2)
ctx := context.Background()
ko1 := sign.KeyOpts{
KeyRef: pubKeyPath1,
}
ko2 := sign.KeyOpts{
KeyRef: pubKeyPath2,
}
// Verify should fail on a bad input
mustErr(cliverify.VerifyBlobCmd(ctx, ko1, "", "badsig", blob), t)
mustErr(cliverify.VerifyBlobCmd(ctx, ko2, "", "badsig", blob), t)
// Now sign the blob with one key
ko := sign.KeyOpts{
KeyRef: privKeyPath1,
PassFunc: passFunc,
}
sig, err := sign.SignBlobCmd(ctx, ko, options.RegistryOpts{}, bp, true, "")
if err != nil {
t.Fatal(err)
}
// Now verify should work with that one, but not the other
must(cliverify.VerifyBlobCmd(ctx, ko1, "", string(sig), bp), t)
mustErr(cliverify.VerifyBlobCmd(ctx, ko2, "", string(sig), bp), t)
}
func TestGenerate(t *testing.T) {
repo, stop := reg(t)
defer stop()
imgName := path.Join(repo, "cosign-e2e")
_, desc, cleanup := mkimage(t, imgName)
defer cleanup()
// Generate the payload for the image, and check the digest.
b := bytes.Buffer{}
must(generate.GenerateCmd(context.Background(), options.RegistryOpts{}, imgName, nil, &b), t)
ss := payload.SimpleContainerImage{}
must(json.Unmarshal(b.Bytes(), &ss), t)
equals(desc.Digest.String(), ss.Critical.Image.DockerManifestDigest, t)
// Now try with some annotations.
b.Reset()
a := map[string]interface{}{"foo": "bar"}
must(generate.GenerateCmd(context.Background(), options.RegistryOpts{}, imgName, a, &b), t)
must(json.Unmarshal(b.Bytes(), &ss), t)
equals(desc.Digest.String(), ss.Critical.Image.DockerManifestDigest, t)
equals(ss.Optional["foo"], "bar", t)
}
func keypair(t *testing.T, td string) (*cosign.Keys, string, string) {
wd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
if err := os.Chdir(td); err != nil {
t.Fatal(err)
}
defer func() {
os.Chdir(wd)
}()
keys, err := cosign.GenerateKeyPair(passFunc)
if err != nil {
t.Fatal(err)
}
privKeyPath := filepath.Join(td, "cosign.key")
if err := ioutil.WriteFile(privKeyPath, keys.PrivateBytes, 0600); err != nil {
t.Fatal(err)
}
pubKeyPath := filepath.Join(td, "cosign.pub")
if err := ioutil.WriteFile(pubKeyPath, keys.PublicBytes, 0600); err != nil {
t.Fatal(err)
}
return keys, privKeyPath, pubKeyPath
}
func TestUploadDownload(t *testing.T) {
repo, stop := reg(t)
defer stop()
td := t.TempDir()
ctx := context.Background()
testCases := map[string]struct {
signature string
signatureType attach.SignatureArgType
expectedErr bool
}{
"file containing signature": {
signature: "testsignaturefile",
signatureType: attach.FileSignature,
expectedErr: false,
},
"raw signature as argument": {
signature: "testsignatureraw",
signatureType: attach.RawSignature,
expectedErr: false,
},
"empty signature as argument": {
signature: "",
signatureType: attach.RawSignature,
expectedErr: true,
},
}
imgName := path.Join(repo, "cosign-e2e")
for testName, testCase := range testCases {
t.Run(testName, func(t *testing.T) {
ref, _, cleanup := mkimage(t, imgName)
payload := "testpayload"
payloadPath := mkfile(payload, td, t)
signature := base64.StdEncoding.EncodeToString([]byte(testCase.signature))
var sigRef string
if testCase.signatureType == attach.FileSignature {
sigRef = mkfile(signature, td, t)
} else {
sigRef = signature
}
// Upload it!
err := attach.SignatureCmd(ctx, options.RegistryOpts{}, sigRef, payloadPath, imgName)
if testCase.expectedErr {
mustErr(err, t)
} else {
must(err, t)
}
// Now download it!
se, err := ociremote.SignedEntity(ref, ociremote.WithRemoteOptions(registryClientOpts(ctx)...))
must(err, t)
sigs, err := se.Signatures()
must(err, t)
signatures, err := sigs.Get()
must(err, t)
if testCase.expectedErr {
if len(signatures) != 0 {
t.Fatalf("unexpected signatures %d, wanted 0", len(signatures))
}
} else {
if len(signatures) != 1 {
t.Fatalf("unexpected signatures %d, wanted 1", len(signatures))
}
if b64sig, err := signatures[0].Base64Signature(); err != nil {
t.Fatalf("Base64Signature() = %v", err)
} else if diff := cmp.Diff(b64sig, signature); diff != "" {
t.Error(diff)
}
if p, err := signatures[0].Payload(); err != nil {
t.Fatalf("Payload() = %v", err)
} else if diff := cmp.Diff(p, []byte(payload)); diff != "" {
t.Error(diff)
}
}
// Now delete it!
cleanup()
})
}
}
func TestUploadBlob(t *testing.T) {
repo, stop := reg(t)
defer stop()
td := t.TempDir()
ctx := context.Background()
imgName := path.Join(repo, "/cosign-upload-e2e")
payload := "testpayload"
payloadPath := mkfile(payload, td, t)
// Upload it!
files := []cremote.File{cremote.FileFromFlag(payloadPath)}
must(upload.BlobCmd(ctx, options.RegistryOpts{}, files, "", imgName), t)
// Check it
ref, err := name.ParseReference(imgName)
if err != nil {
t.Fatal(err)
}
// Now download it with sget (this should fail by tag)
if err := sget.New(imgName, "", os.Stdout).Do(ctx); err == nil {
t.Error("expected download to fail")
}
img, err := remote.Image(ref)
if err != nil {
t.Fatal(err)
}
dgst, err := img.Digest()
if err != nil {
t.Fatal(err)
}
result := &bytes.Buffer{}
// But pass by digest
if err := sget.New(imgName+"@"+dgst.String(), "", result).Do(ctx); err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(result)
if err != nil {
t.Fatal(err)
}
if string(b) != payload {
t.Errorf("expected contents to be %s, got %s", payload, string(b))
}
}
func TestAttachSBOM(t *testing.T) {
repo, stop := reg(t)
defer stop()
ctx := context.Background()
imgName := path.Join(repo, "sbom-image")
img, _, cleanup := mkimage(t, imgName)
defer cleanup()
out := bytes.Buffer{}
_, err := download.SBOMCmd(ctx, options.RegistryOpts{}, img.Name(), &out)
if err == nil {
t.Fatal("Expected error")
}
t.Log(out)
out.Reset()
// Upload it!
must(attach.SBOMCmd(ctx, options.RegistryOpts{}, "./testdata/bom-go-mod.spdx", "spdx", imgName), t)
sboms, err := download.SBOMCmd(ctx, options.RegistryOpts{}, imgName, &out)
if err != nil {
t.Fatal(err)
}
t.Log(out)
if len(sboms) != 1 {
t.Fatalf("Expected one sbom, got %d", len(sboms))
}
want, err := ioutil.ReadFile("./testdata/bom-go-mod.spdx")
if err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(string(want), sboms[0]); diff != "" {
t.Errorf("diff: %s", diff)
}
// Generate key pairs to sign the sbom
td1 := t.TempDir()
td2 := t.TempDir()
_, privKeyPath1, pubKeyPath1 := keypair(t, td1)
_, _, pubKeyPath2 := keypair(t, td2)
// Verify should fail on a bad input
mustErr(verify(pubKeyPath1, imgName, true, nil, "sbom"), t)
mustErr(verify(pubKeyPath2, imgName, true, nil, "sbom"), t)
// Now sign the sbom with one key
ko1 := sign.KeyOpts{KeyRef: privKeyPath1, PassFunc: passFunc}
must(sign.SignCmd(ctx, ko1, options.RegistryOpts{}, nil, []string{imgName}, "", true, "", false, false, "sbom"), t)
// Now verify should work with that one, but not the other
must(verify(pubKeyPath1, imgName, true, nil, "sbom"), t)
mustErr(verify(pubKeyPath2, imgName, true, nil, "sbom"), t)
}
func setenv(t *testing.T, k, v string) func() {
if err := os.Setenv(k, v); err != nil {
t.Fatalf("error setitng env: %v", err)
}
return func() {
os.Unsetenv(k)
}
}
func TestTlog(t *testing.T) {
repo, stop := reg(t)
defer stop()
td := t.TempDir()
imgName := path.Join(repo, "cosign-e2e")
_, _, cleanup := mkimage(t, imgName)
defer cleanup()
_, privKeyPath, pubKeyPath := keypair(t, td)
ctx := context.Background()
// Verify should fail at first
mustErr(verify(pubKeyPath, imgName, true, nil, ""), t)
// Now sign the image without the tlog
ko := sign.KeyOpts{
KeyRef: privKeyPath,
PassFunc: passFunc,
RekorURL: rekorURL,
}
must(sign.SignCmd(ctx, ko, options.RegistryOpts{}, nil, []string{imgName}, "", true, "", false, false, ""), t)
// Now verify should work!
must(verify(pubKeyPath, imgName, true, nil, ""), t)
// Now we turn on the tlog!
defer setenv(t, options.ExperimentalEnv, "1")()
// Verify shouldn't work since we haven't put anything in it yet.
mustErr(verify(pubKeyPath, imgName, true, nil, ""), t)
// Sign again with the tlog env var on
must(sign.SignCmd(ctx, ko, options.RegistryOpts{}, nil, []string{imgName}, "", true, "", false, false, ""), t)
// And now verify works!
must(verify(pubKeyPath, imgName, true, nil, ""), t)
}
func TestGetPublicKeyCustomOut(t *testing.T) {
td := t.TempDir()
keys, privKeyPath, _ := keypair(t, td)
ctx := context.Background()
outFile := "output.pub"
outPath := filepath.Join(td, outFile)
outWriter, err := os.OpenFile(outPath, os.O_WRONLY|os.O_CREATE, 0600)
must(err, t)
pk := cli.Pkopts{
KeyRef: privKeyPath,
}
must(cli.GetPublicKey(ctx, pk, cli.NamedWriter{Name: outPath, Writer: outWriter}, passFunc), t)
output, err := ioutil.ReadFile(outPath)
must(err, t)
equals(keys.PublicBytes, output, t)
}
func mkfile(contents, td string, t *testing.T) string {
f, err := ioutil.TempFile(td, "")
if err != nil {
t.Fatal(err)
}
defer f.Close()
if _, err := f.Write([]byte(contents)); err != nil {
t.Fatal(err)
}
return f.Name()
}
func mkimage(t *testing.T, n string) (name.Reference, *remote.Descriptor, func()) {
ref, err := name.ParseReference(n, name.WeakValidation)
if err != nil {
t.Fatal(err)
}
img, err := random.Image(512, 5)
if err != nil {
t.Fatal(err)
}
regClientOpts := registryClientOpts(context.Background())
if err := remote.Write(ref, img, regClientOpts...); err != nil {
t.Fatal(err)
}
remoteImage, err := remote.Get(ref, regClientOpts...)
if err != nil {
t.Fatal(err)
}
cleanup := func() {
_ = remote.Delete(ref, regClientOpts...)
ref, _ := ociremote.SignatureTag(ref.Context().Digest(remoteImage.Descriptor.Digest.String()), ociremote.WithRemoteOptions(regClientOpts...))
_ = remote.Delete(ref, regClientOpts...)
}
return ref, remoteImage, cleanup
}
func must(err error, t *testing.T) {
t.Helper()
if err != nil {
t.Fatal(err)
}
}
func mustErr(err error, t *testing.T) {
t.Helper()
if err == nil {
t.Fatal("expected error")
}
}
func equals(v1, v2 interface{}, t *testing.T) {
if diff := cmp.Diff(v1, v2); diff != "" {
t.Error(diff)
}
}
func reg(t *testing.T) (string, func()) {
repo := os.Getenv("COSIGN_TEST_REPO")
if repo != "" {
return repo, func() {}
}
t.Log("COSIGN_TEST_REPO unset, using fake registry")
r := httptest.NewServer(registry.New())
u, err := url.Parse(r.URL)
if err != nil {
t.Fatal(err)
}
return u.Host, r.Close
}
func registryClientOpts(ctx context.Context) []remote.Option {
return []remote.Option{
remote.WithAuthFromKeychain(authn.DefaultKeychain),
remote.WithContext(ctx),
}
}
|
[
"\"COSIGN_TEST_REPO\""
] |
[] |
[
"COSIGN_TEST_REPO"
] |
[]
|
["COSIGN_TEST_REPO"]
|
go
| 1 | 0 | |
cmd/tv-fe/tv-fe.go
|
package main
import (
"fmt"
"net/http"
"os"
"github.com/Sirupsen/logrus"
"github.com/keitax/tv-fe"
)
func main() {
c := tvfe.GetFromEnv()
if c.RunLevel == tvfe.DevelopmentRunLevel {
logrus.SetLevel(logrus.DebugLevel)
}
app, err := tvfe.NewApplication(c)
if err != nil {
logrus.Fatal(err)
os.Exit(1)
}
port := os.Getenv("PORT")
logrus.Infof("Listen on %s.\n", port)
if err := http.ListenAndServe(fmt.Sprintf(":%s", port), app); err != nil {
logrus.Fatal(err)
}
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
python/paddle/fluid/tests/unittests/test_parallel_executor_pg.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import os
os.environ['FLAGS_enable_parallel_graph'] = str(1)
import paddle.fluid.core as core
import os
from parallel_executor_test_base import TestParallelExecutorBase
from simple_nets import simple_fc_net, init_data
class TestMNIST(TestParallelExecutorBase):
@classmethod
def setUpClass(cls):
os.environ['CPU_NUM'] = str(4)
# simple_fc
def check_simple_fc_convergence(self, use_cuda, use_reduce=False):
if use_cuda and not core.is_compiled_with_cuda():
return
img, label = init_data()
self.check_network_convergence(
simple_fc_net,
feed_dict={"image": img,
"label": label},
use_cuda=use_cuda,
use_reduce=use_reduce)
def test_simple_fc(self):
# use_cuda
self.check_simple_fc_convergence(True)
def check_simple_fc_parallel_accuracy(self, use_cuda):
if use_cuda and not core.is_compiled_with_cuda():
return
img, label = init_data()
single_first_loss, single_last_loss = self.check_network_convergence(
method=simple_fc_net,
feed_dict={"image": img,
"label": label},
use_cuda=use_cuda,
use_parallel_executor=False)
parallel_first_loss, parallel_last_loss = self.check_network_convergence(
method=simple_fc_net,
feed_dict={"image": img,
"label": label},
use_cuda=use_cuda,
use_parallel_executor=True)
self.assertAlmostEquals(
np.mean(parallel_first_loss),
single_first_loss,
delta=1e-6, )
self.assertAlmostEquals(
np.mean(parallel_last_loss), single_last_loss, delta=1e-6)
def test_simple_fc_parallel_accuracy(self):
self.check_simple_fc_parallel_accuracy(True)
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[
"CPU_NUM",
"FLAGS_enable_parallel_graph"
] |
[]
|
["CPU_NUM", "FLAGS_enable_parallel_graph"]
|
python
| 2 | 0 | |
common/python/mongo_db.py
|
import logging
import pymongo
import os
CONNECT_TIMEOUT = 2000
log = logging.getLogger('app')
env = os.environ
host = env.get('MONGO_HOST')
port = int(env.get('MONGO_PORT', 27017))
login = env.get('MONGO_LOGIN')
password = env.get('MONGO_PASSWORD')
_client = None
def get_client():
global _client
if not host:
return
if _client:
return _client
log.info('Connecting to MongoDB: %s:%s', host, port)
_client = pymongo.MongoClient(
host, port,
connect=True,
connectTimeoutMS=CONNECT_TIMEOUT,
username=login,
password=password,
)
return _client
def close():
global _client
if _client:
log.info('Closing connection to MongoDB: %s:%s', host, port)
_client.close()
_client = None
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
python/tvm/target/target.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Target data structure."""
import json
import os
import re
import warnings
import tvm._ffi
from tvm._ffi import register_func as _register_func
from tvm.runtime import Object, convert
from tvm.runtime.container import String
from tvm.ir.container import Map
from . import _ffi_api
@tvm._ffi.register_object
class TargetKind(Object):
"""Kind of a compilation target"""
@property
def options(self):
"""Returns the dict of available option names and types"""
return dict(_ffi_api.ListTargetKindOptions(self))
@staticmethod
def options_from_name(kind_name: str):
"""Returns the dict of available option names and types from a name of TargetKind"""
return dict(_ffi_api.ListTargetKindOptionsFromName(kind_name))
@tvm._ffi.register_object
class Target(Object):
"""Target device information, use through TVM API.
Note
----
You can create target using the constructor or the following functions
- :py:func:`tvm.target.arm_cpu` create arm_cpu target
- :py:func:`tvm.target.cuda` create CUDA target
- :py:func:`tvm.target.rocm` create ROCM target
- :py:func:`tvm.target.mali` create Mali target
- :py:func:`tvm.target.intel_graphics` create Intel Graphics target
"""
def __init__(self, target, host=None):
"""Construct a TVM target object from
1) Raw target string
2) Target config dict
3) Target tag
Parameters
----------
target : Union[str, Dict[str, Any]]
Can be one of a literal target string, a json string describing
a configuration, or a dictionary of configuration options.
When using a dictionary or json string to configure target, the
possible values are:
kind : str (required)
Which codegen path to use, for example 'llvm' or 'cuda'.
keys : List of str (optional)
A set of strategies that can be dispatched to. When using
"kind=opencl" for example, one could set keys to ["mali", "opencl", "gpu"].
device : str (optional)
A single key that corresponds to the actual device being run on.
This will be effectively appended to the keys.
libs : List of str (optional)
The set of external libraries to use. For example ['cblas', 'mkl'].
system-lib : bool (optional)
If True, build a module that contains self registered functions.
Useful for environments where dynamic loading like dlopen is banned.
mcpu : str (optional)
The specific cpu being run on. Serves only as an annotation.
model : str (optional)
An annotation indicating what model a workload came from.
runtime : str (optional)
An annotation indicating which runtime to use with a workload.
mtriple : str (optional)
The llvm triplet describing the target, for example "arm64-linux-android".
mattr : List of str (optional)
The llvm features to compile with, for example ["+avx512f", "+mmx"].
mfloat-abi : str (optional)
An llvm setting that is one of 'hard' or 'soft' indicating whether to use
hardware or software floating-point operations.
mabi : str (optional)
An llvm setting. Generate code for the specified ABI, for example "lp64d".
host : Union[str, Dict[str, Any]] (optional)
Description for target host. Can be recursive. Similar to target.
host : Optional[Union[str, Dict[str, Any]]]
Similar to target but for target host. Can be one of a literal target host string,
a json string describing a configuration, or a dictionary of configuration options.
When using a dictionary or json string to configure target, the possible values are
same as target.
"""
if isinstance(target, (dict, str)):
target = convert(target)
if isinstance(host, (dict, str)):
host = convert(host)
if target is None or not isinstance(target, (Map, String, Target)):
raise ValueError("target has to be a string or dictionary.")
if host is not None:
if not isinstance(host, (Map, String, Target)):
raise ValueError("target host has to be a string or dictionary.")
self.__init_handle_by_constructor__(_ffi_api.Target, Target(target), Target(host))
else:
self.__init_handle_by_constructor__(_ffi_api.Target, target)
def __enter__(self):
_ffi_api.TargetEnterScope(self)
return self
def __exit__(self, ptype, value, trace):
_ffi_api.TargetExitScope(self)
def export(self):
return _ffi_api.TargetExport(self)
def with_host(self, host=None):
return _ffi_api.WithHost(self, Target(host))
@staticmethod
def current(allow_none=True):
"""Returns the current target.
Parameters
----------
allow_none : bool
Whether allow the current target to be none
Raises
------
ValueError if current target is not set.
"""
return _ffi_api.TargetCurrent(allow_none)
@property
def arch(self):
"""Returns the cuda arch from the target if it exists."""
return str(self.attrs.get("arch", ""))
@property
def max_num_threads(self):
"""Returns the max_num_threads from the target if it exists."""
return int(self.attrs["max_num_threads"])
@property
def thread_warp_size(self):
"""Returns the thread_warp_size from the target if it exists."""
return int(self.attrs["thread_warp_size"])
@property
def max_function_args(self):
return int(self.attrs.get("max_function_args", -1))
@property
def device_name(self):
return str(self.attrs.get("device", ""))
@property
def model(self):
"""Returns model from the target if it exists."""
return str(self.attrs.get("model", "unknown"))
@property
def mcpu(self):
"""Returns the mcpu from the target if it exists."""
return str(self.attrs.get("mcpu", ""))
@property
def mattr(self):
"""Returns the mattr from the target if it exists."""
return list(self.attrs.get("mattr", []))
@property
def supports_integer_dot_product(self):
if self.attrs.get("supports_integer_dot_product", []):
return bool(self.attrs["supports_integer_dot_product"])
return False
@property
def libs(self):
return list(self.attrs.get("libs", []))
def get_kind_attr(self, attr_name):
"""Get additional attribute about the target kind.
Parameters
----------
attr_name : str
The attribute name.
Returns
-------
value : object
The attribute value
"""
return _ffi_api.TargetKindGetAttr(self.kind, attr_name)
@staticmethod
def list_kinds():
"""Returns the list of available target names."""
return list(_ffi_api.ListTargetKinds())
@staticmethod
def check_and_update_host_consist(target, host=None, target_is_dict_key=True):
"""A helper function that merges a legacy "target, target_host" pair, then returns
the merged target and its host field. The function is for legacy target and target
host pair only, and should not be used in the new target system.
Parameters
----------
target : Union[str, Dict[str, Any], Target]
The target or heterogeneous target
host : Union[str, Dict[str, Any], Target, None]
The target host
target_is_dict_key : Bool
When the type of target is dict, whether Target is the key (Otherwise the value)
"""
if isinstance(target, (dict, str)):
target = convert(target)
if isinstance(host, (dict, str)):
host = convert(host)
if target is None:
assert host is None, "Target host is not empty when target is empty."
return target, host
if isinstance(target, Map) and "kind" not in target:
new_target = {}
for tgt, mod in target.items():
if not target_is_dict_key:
tgt, mod = mod, tgt
if isinstance(tgt, (Map, String, Target)):
tgt, host = Target.check_and_update_host_consist(tgt, host)
if not target_is_dict_key:
tgt, mod = mod, tgt
new_target[tgt] = mod
target = new_target
else:
target = Target(target, host)
host = target.host
return target, host
# TODO(@tvm-team): Deprecate the helper functions below. Encourage the usage of config dict instead.
def _merge_opts(opts, new_opts):
"""Helper function to merge options"""
if isinstance(new_opts, str):
new_opts = new_opts.split()
if new_opts:
opt_set = set(opts)
new_opts = [opt for opt in new_opts if opt not in opt_set]
return opts + new_opts
return opts
def cuda(model="unknown", arch=None, options=None):
"""Returns a cuda target.
Parameters
----------
model: str
The model of cuda device (e.g. 1080ti)
arch: str
The cuda architecture (e.g. sm_61)
options : str or list of str
Additional options
"""
opts = _merge_opts(["-model=%s" % model], options)
if arch:
opts = _merge_opts(["-arch=%s" % arch], opts)
if not any(["-arch" in opt for opt in opts]):
warnings.warn("Try specifying cuda arch by adding 'arch=sm_xx' to your target.")
return Target(" ".join(["cuda"] + opts))
def rocm(model="unknown", options=None):
"""Returns a ROCM target.
Parameters
----------
model: str
The model of this device
options : str or list of str
Additional options
"""
opts = _merge_opts(["-model=%s" % model], options)
return Target(" ".join(["rocm"] + opts))
def mali(model="unknown", options=None):
"""Returns a ARM Mali GPU target.
Parameters
----------
model: str
The model of this device
options : str or list of str
Additional options
"""
opts = ["-device=mali", "-model=%s" % model]
opts = _merge_opts(opts, options)
return Target(" ".join(["opencl"] + opts))
def intel_graphics(model="unknown", options=None):
"""Returns an Intel Graphics target.
Parameters
----------
model: str
The model of this device
options : str or list of str
Additional options
"""
opts = ["-device=intel_graphics", "-model=%s" % model, "-thread_warp_size=16"]
opts = _merge_opts(opts, options)
return Target(" ".join(["opencl"] + opts))
MICRO_SUPPORTED_MODELS = {
"host": [],
"atsamd51": ["-mcpu=cortex-m4"],
"cxd5602gg": ["-mcpu=cortex-m4"],
"esp32": [],
"imxrt10xx": ["-mcpu=cortex-m7"],
"mps2_an521": ["-mcpu=cortex-m33"],
"mps3_an547": ["-mcpu=cortex-m55"],
"nrf52840": ["-mcpu=cortex-m4"],
"nrf5340dk": ["-mcpu=cortex-m33"],
"sam3x8e": ["-mcpu=cortex-m3"],
"stm32f746xx": ["-mcpu=cortex-m7", "-march=armv7e-m"],
"stm32l4r5zi": ["-mcpu=cortex-m4"],
"stm32u5xx": ["-mcpu=cortex-m33"],
"zynq_mp_r5": ["-mcpu=cortex-r5"],
}
def micro(model="unknown", options=None):
"""Returns a microTVM target.
Parameters
----------
model : str
Canonically identifies the target device. This is typically a device board level name.
The allowed values are MICRO_SUPPORTED_MODELS.keys().
options : str or list of str
Additional options
"""
if model not in MICRO_SUPPORTED_MODELS:
raise ValueError(f"Model {model} not supported by tvm.target.micro.")
opts = _merge_opts(
MICRO_SUPPORTED_MODELS[model] + [f"-model={model}"],
options,
)
# NOTE: in the future, the default micro target will be LLVM except when
# external dependencies are present.
return Target(" ".join(["c"] + opts))
def arm_cpu(model="unknown", options=None):
"""Returns a ARM CPU target.
This function will also download pre-tuned op parameters when there is none.
Parameters
----------
model: str
SoC name or phone name of the arm board.
options : str or list of str
Additional options
"""
trans_table = {
"pixel2": ["-model=snapdragon835", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"mate10": ["-model=kirin970", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"mate10pro": ["-model=kirin970", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"p20": ["-model=kirin970", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"p20pro": ["-model=kirin970", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"rasp3b": ["-model=bcm2837", "-mtriple=armv7l-linux-gnueabihf", "-mattr=+neon"],
"rasp4b": [
"-model=bcm2711",
"-mtriple=armv8l-linux-gnueabihf",
"-mattr=+neon",
"-mcpu=cortex-a72",
],
"rasp4b64": [
"-model=bcm2711",
"-mtriple=aarch64-linux-gnu",
"-mattr=+neon",
"-mcpu=cortex-a72",
],
"rk3399": ["-model=rk3399", "-mtriple=aarch64-linux-gnu", "-mattr=+neon"],
"pynq": ["-model=pynq", "-mtriple=armv7a-linux-eabi", "-mattr=+neon"],
"ultra96": ["-model=ultra96", "-mtriple=aarch64-linux-gnu", "-mattr=+neon"],
"beagleai": [
"-model=beagleai",
"-mtriple=armv7a-linux-gnueabihf",
"-mattr=+neon,+vfp4,+thumb2",
"-mcpu=cortex-a15",
],
"stm32mp1": [
"-model=stm32mp1",
"-mtriple=armv7a-linux-gnueabihf",
"-mattr=+neon,+vfp4,+thumb2",
"-mcpu=cortex-a7",
],
"thunderx": [
"-model=thunderx",
"-mtriple=aarch64-linux-gnu",
"-mattr=+neon,+crc,+lse",
"-mcpu=thunderxt88",
],
}
pre_defined_opt = trans_table.get(model, ["-model=%s" % model])
opts = ["-device=arm_cpu"] + pre_defined_opt
opts = _merge_opts(opts, options)
return Target(" ".join(["llvm"] + opts))
def rasp(options=None):
"""Return a Raspberry 3b target.
Parameters
----------
options : str or list of str
Additional options
"""
warnings.warn(
"tvm.target.rasp() is going to be deprecated. " 'Please use tvm.target.arm_cpu("rasp3b")'
)
return arm_cpu("rasp3b", options)
def vta(model="unknown", options=None):
opts = ["-device=vta", "-keys=vta,cpu", "-model=%s" % model]
opts = _merge_opts(opts, options)
return Target(" ".join(["ext_dev"] + opts))
def bifrost(model="unknown", options=None):
"""Return an ARM Mali GPU target (Bifrost architecture).
Parameters
----------
options : str or list of str
Additional options
"""
opts = ["-device=bifrost", "-model=%s" % model]
opts = _merge_opts(opts, options)
return Target(" ".join(["opencl"] + opts))
def riscv_cpu(model="sifive-u54", options=None):
"""Returns a RISC-V CPU target.
Default: sifive-u54 rv64gc
Parameters
----------
model: str
CPU name.
options : str or list of str
Additional options
"""
trans_table = {
"sifive-e31": [
"-model=sifive-e31",
"-mtriple=riscv32-unknown-linux-gnu",
"-mcpu=sifive-e31",
"-mabi=ilp32",
# cc: riscv64-unknown-linux-gnu-g++ -march=rv32imac -mabi=ilp32 -mcpu=sifive-e31
],
"sifive-e76": [
"-model=sifive-e76",
"-mtriple=riscv32-unknown-linux-gnu",
"-mcpu=sifive-e76",
"-mabi=ilp32",
# cc: riscv64-unknown-linux-gnu-g++ -march=rv32imafc -mabi=ilp32 -mcpu=sifive-e76
],
"sifive-u54": [
"-model=sifive-u54",
"-mtriple=riscv64-unknown-linux-gnu",
"-mcpu=sifive-u54",
"-mabi=lp64d",
# cc: riscv64-unknown-linux-gnu-g++ -march=rv64gc -mabi=lp64d -mcpu=sifive-u54
],
"sifive-u74": [
"-model=sifive-u74",
"-mtriple=riscv64-unknown-linux-gnu",
"-mcpu=sifive-u74",
"-mabi=lp64d",
# cc: riscv64-unknown-linux-gnu-g++ -march=rv64gc -mabi=lp64d -mcpu=sifive-u74
],
}
pre_defined_opt = trans_table.get(model, ["-model=%s" % model])
opts = ["-device=arm_cpu"] + pre_defined_opt
opts = _merge_opts(opts, options)
return Target(" ".join(["llvm"] + opts))
def hexagon(cpu_ver="v66", **kwargs):
"""Returns a Hexagon target.
Parameters
----------
cpu_ver : str (default: "v66")
CPU version used for code generation. Not all allowed cpu str
will be valid, LLVM will throw an error.
Recognized keyword parameters
-----------------------------
hvx : int (default: 128)
Size of HVX vector in bytes. Value of 0 disables HVX codegen.
sim_options : str or list of str (default: None)
User defined sim arguments. CPU version defaults to cpu_ver.
Otherwise, separate versions are used for codegen and sim. Not
all allowed cpu strings will be valid, simulator will throw an
error if invalid. Does not affect codegen.
llvm_options : str or list of str (default: None)
User defined compiler arguments.
use_qfloat : bool (default: True for cpu_ver >= v68, False otherwise)
Whether to use QFloat HVX instructions.
use_ieee_fp : bool (default: False)
Whether to use IEEE HVX instructions
link_params : bool (default: False)
Whether to link graph parameters into the LLVM module.
Note: Floating point support in HVX requires LLVM 14+.
"""
# Some of the target parameters correspond to target kind attributes
# listed in src/target/target_kind.cc. For those parameters, their
# names follow the attribute names with the exception of '_' being used
# in place of '-'.
# Example compiler arguments
# llvm -mtriple=hexagon -mcpu=hexagonv66 -mattr=+hvxv66,+hvx-length128b
def get_arch_version(cpu_ver):
m = re.match(r"v([0-9]+).*", cpu_ver)
assert m
return int(m.group(1))
# Check for valid codegen cpu
valid_hex = ["v65", "v66", "v67", "v67t", "v68", "v69"]
try:
cpu_ver = cpu_ver[cpu_ver.index("v") :].lower()
assert cpu_ver in valid_hex
except:
msg = "{} is not a valid Hexagon version\nvalid versions include {}"
raise ValueError(msg.format(cpu_ver, valid_hex)) from None
# Target configuration:
arch_version = get_arch_version(cpu_ver)
config = {
"hvx": 128,
"sim_options": None,
"llvm_options": None,
"use_qfloat": arch_version >= 68,
"use_ieee_fp": False,
"link_params": False,
}
config.update(kwargs)
# Warn about obsolete parameter names.
if config.get("sim_args"):
msg = "The keyword parameter 'sim_args' is deprecated, use 'sim_options' instead"
warnings.warn(msg, stacklevel=2)
config.update({"sim_options": config["sim_args"]})
if config.get("llvm_args"):
msg = "The keyword parameter 'llvm_args' is deprecated, use 'llvm_options' instead"
warnings.warn(msg, stacklevel=2)
config.update({"llvm_options": config["llvm_args"]})
# LLVM target string
def create_llvm_target(cpu_ver, config):
"""Create LLVM target string."""
target = " -mtriple=hexagon"
mcpu = " -mcpu=hexagon" + cpu_ver
# Process the options that affect target features and return the
# target feature string.
def create_target_features(config):
features = {
"use_qfloat": "hvx-qfloat",
"use_ieee_fp": "hvx-ieee-fp",
}
tfs = []
if config["hvx"] > 0:
valid_hvx = [0, 64, 128]
if not config["hvx"] in valid_hvx:
raise ValueError("Invalid hvx value, should be one of " + str(valid_hvx))
tfs += ["+hvx" + cpu_ver, "+hvx-length" + str(config["hvx"]) + "b"]
else:
tfs += ["-hvx"]
# All the additional features happen to only apply to v68+.
# Don't bother applying them (even with '-') to lower versions.
if arch_version >= 68:
tfs += ["-+"[config[f]] + features[f] for f in features]
return "-mattr=" + ",".join(tfs) if tfs else ""
return target + mcpu + " " + create_target_features(config)
# Simulator options string
def create_sim_options(cpu_ver, config):
"""Create simulator option string."""
def validate_hvx_length(codegen_hvx, sim_options):
if sim_options and "--hvx_length" in sim_options:
# If --hvx_length was specified, check HVX length of sim
# vs codegen
i = sim_options.index("hvx_length") + len("hvx_length") + 1
sim_hvx = sim_options[i : i + 3]
if sim_hvx != str(codegen_hvx):
msg = "sim hvx {} and codegen hvx {} mismatch!".format(sim_hvx, codegen_hvx)
# Set the stacklevel to the tvm.target.hexagon() call.
warnings.warn(msg, stacklevel=4)
elif codegen_hvx != 0:
# If --hvx_length was not given, add it if HVX is enabled
sim_options = sim_options + " " if isinstance(sim_options, str) else ""
sim_options += "--hvx_length " + str(codegen_hvx)
return sim_options or ""
hvx = config["hvx"]
sim_options = config["sim_options"]
if not sim_options:
return cpu_ver + " " + validate_hvx_length(hvx, sim_options)
sim_cpu = cpu_ver + " "
# Add user defined args
if isinstance(sim_options, list):
sim_options = " ".join(sim_options)
# Check for supplied sim cpu version
if "v6" in sim_options:
sim_cpu = ""
# Regex match for allowed cpus
valid_cpu_str_regex = (
r"(?P<pre>--.*\s)?(--m)?"
+ r"(?P<base_version>v6[25678])(?P<sub_version>[a-z])?"
+ r"(?P<l2_size>_[0-9]+)?(?P<rev>_rev[0-9])?\s?(?P<post>--.*)?"
)
m = re.match(valid_cpu_str_regex, sim_options.lower())
if not m:
raise ValueError('Invalid simulator argument string "{}"'.format(sim_options))
# Parse options into correct order
cpu_attr = {x: str(m.groupdict()[x] or "") for x in m.groupdict()}
sim_options = (
cpu_attr["base_version"]
+ cpu_attr["sub_version"]
+ cpu_attr["l2_size"]
+ cpu_attr["rev"]
+ " "
+ cpu_attr["pre"]
+ cpu_attr["post"]
)
return sim_cpu + " " + validate_hvx_length(hvx, sim_options)
# LLVM options string
def create_llvm_options(cpu_ver, config): # pylint: disable=unused-argument
"""Create LLVM options string."""
llvm_options = config["llvm_options"]
# TVM's option parser doesn't allow '=' in values, but '=' can
# appear in LLVM flags. Replace it with '@', since it's unlikely
# that '@' will be used in another context.
if llvm_options is None or len(llvm_options.strip()) == 0:
return ""
args = [s.replace("=", "@") for s in llvm_options.split()]
return "--llvm-options=" + ",".join(args)
# TVM target attributes string
def create_tvm_options(cpu_ver, config): # pylint: disable=unused-argument
"""Create TVM target features string."""
features = {
"link_params": "link-params",
}
opts = ""
for k in config:
if k in features:
opts += " --" + features[k] + "=" + str(config[k])
return opts
# Sim args
os.environ["HEXAGON_SIM_ARGS"] = create_sim_options(cpu_ver, config)
target_str = create_llvm_target(cpu_ver, config)
llvm_str = create_llvm_options(cpu_ver, config)
tvm_str = create_tvm_options(cpu_ver, config)
args_list = target_str.split() + llvm_str.split() + tvm_str.split()
return Target(" ".join(["hexagon"] + args_list))
STM32_SUPPORTED_SERIES = {
# High-Performance
"stm32H7xx": ["-device=arm_cpu", "-mcpu=cortex-m7", "-march=armv7e-m"],
"stm32F7xx": ["-device=arm_cpu", "-mcpu=cortex-m7"],
"stm32F4xx": ["-device=arm_cpu", "-mcpu=cortex-m4"],
"stm32F2xx": ["-device=arm_cpu", "-mcpu=cortex-m3"],
# Mainstream
"stm32G0xx": ["-device=arm_cpu", "-mcpu=cortex-m0+"],
"stm32F0xx": ["-device=arm_cpu", "-mcpu=cortex-m0"],
"stm32F1xx": ["-device=arm_cpu", "-mcpu=cortex-m3"],
"stm32G4xx": ["-device=arm_cpu", "-mcpu=cortex-m4"],
"stm32F3xx": ["-device=arm_cpu", "-mcpu=cortex-m4"],
# Low-power
"stm32U5xx": ["-device=arm_cpu", "-mcpu=cortex-m33"],
"stm32L5xx": ["-device=arm_cpu", "-mcpu=cortex-m33"],
"stm32L4xx": ["-device=arm_cpu", "-mcpu=cortex-m4"],
"stm32L1xx": ["-device=arm_cpu", "-mcpu=cortex-m3"],
"stm32L0xx": ["-device=arm_cpu", "-mcpu=cortex-m0+"],
}
def stm32(series="unknown", options=None):
"""Returns a STM32 target.
Parameters
----------
series: str
Series name of a STM32 board series, eg. stm32H7xx or stm32F4xx
options : str or list of str
Additional options
"""
if series not in STM32_SUPPORTED_SERIES:
raise ValueError(f"Series {series} is not supported by tvm.target.stm32.")
opts = _merge_opts(STM32_SUPPORTED_SERIES[series], options)
return Target(" ".join(["c"] + opts))
def create(target):
"""Deprecated. Use the constructor of :py:mod:`tvm.target.Target` directly."""
warnings.warn("tvm.target.create() is being deprecated. Please use tvm.target.Target() instead")
return Target(target)
@_register_func("target._load_config_dict")
def _load_config_dict(config_dict_str):
try:
config = json.loads(config_dict_str)
except json.decoder.JSONDecodeError:
return None
if not isinstance(config, dict):
return None
for key in config.keys():
if not isinstance(key, str):
return None
return config
|
[] |
[] |
[
"HEXAGON_SIM_ARGS"
] |
[]
|
["HEXAGON_SIM_ARGS"]
|
python
| 1 | 0 | |
internal/tracer/tracer.go
|
package tracer
import (
"fmt"
"io"
"os"
"reflect"
"strconv"
"sync"
"github.com/inconshreveable/log15"
"github.com/opentracing/opentracing-go"
"github.com/uber/jaeger-client-go"
jaegercfg "github.com/uber/jaeger-client-go/config"
jaegermetrics "github.com/uber/jaeger-lib/metrics"
"go.uber.org/automaxprocs/maxprocs"
ddopentracing "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer"
ddtracer "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"github.com/sourcegraph/sourcegraph/internal/conf/conftypes"
"github.com/sourcegraph/sourcegraph/internal/env"
"github.com/sourcegraph/sourcegraph/internal/trace/ot"
"github.com/sourcegraph/sourcegraph/internal/version"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
func init() {
// Tune GOMAXPROCS for kubernetes. All our binaries import this package,
// so we tune for all of them.
//
// TODO it is surprising that we do this here. We should create a standard
// import for sourcegraph binaries which would have less surprising
// behaviour.
if _, err := maxprocs.Set(); err != nil {
log15.Error("automaxprocs failed", "error", err)
}
if r := os.Getenv("MUX_ANALYTICS_TRACE_RATE"); r != "" {
rate, err := strconv.ParseFloat(r, 64)
if err != nil {
log15.Error("Failed to parse MUX_ANALYTICS_TRACE_RATE", "error", err)
return
}
MUX_ANALYTICS_TRACE_RATE = rate
}
}
var MUX_ANALYTICS_TRACE_RATE = 0.1
// options control the behavior of a TracerType
type options struct {
TracerType
externalURL string
debug bool
// these values are not configurable by site config
serviceName string
version string
env string
}
type TracerType string
const (
None TracerType = "none"
Datadog TracerType = "datadog"
Ot TracerType = "opentracing"
)
// isSetByUser returns true if the TracerType is one supported by the schema
// should be kept in sync with ObservabilityTracing.Type in schema/site.schema.json
func (t TracerType) isSetByUser() bool {
switch t {
case Datadog, Ot:
return true
}
return false
}
// Init should be called from the main function of service
func Init(c conftypes.WatchableSiteConfig) {
opts := &options{}
opts.serviceName = env.MyName
if version.IsDev(version.Version()) {
opts.env = "dev"
}
if d := os.Getenv("DD_ENV"); d != "" {
opts.env = d
}
opts.version = version.Version()
initTracer(opts, c)
}
// initTracer is a helper that should be called exactly once (from Init).
func initTracer(opts *options, c conftypes.WatchableSiteConfig) {
globalTracer := newSwitchableTracer()
opentracing.SetGlobalTracer(globalTracer)
// initial tracks if it's our first run of conf.Watch. This is used to
// prevent logging "changes" when it's the first run.
initial := true
// Initially everything is disabled since we haven't read conf yet.
oldOpts := options{
serviceName: opts.serviceName,
version: opts.version,
env: opts.env,
// the values below may change
TracerType: None,
debug: false,
externalURL: "",
}
// Watch loop
go c.Watch(func() {
siteConfig := c.SiteConfig()
samplingStrategy := ot.TraceNone
shouldLog := false
setTracer := None
if tracingConfig := siteConfig.ObservabilityTracing; tracingConfig != nil {
switch tracingConfig.Sampling {
case "all":
samplingStrategy = ot.TraceAll
setTracer = Ot
case "selective":
samplingStrategy = ot.TraceSelective
setTracer = Ot
}
if t := TracerType(tracingConfig.Type); t.isSetByUser() {
setTracer = t
}
shouldLog = tracingConfig.Debug
}
if tracePolicy := ot.GetTracePolicy(); tracePolicy != samplingStrategy && !initial {
log15.Info("opentracing: TracePolicy", "oldValue", tracePolicy, "newValue", samplingStrategy)
}
initial = false
ot.SetTracePolicy(samplingStrategy)
opts := options{
externalURL: siteConfig.ExternalURL,
TracerType: setTracer,
debug: shouldLog,
serviceName: opts.serviceName,
version: opts.version,
env: opts.env,
}
if opts == oldOpts {
// Nothing changed
return
}
prevTracer := oldOpts.TracerType
oldOpts = opts
t, closer, err := newTracer(&opts, prevTracer)
if err != nil {
log15.Warn("Could not initialize tracer", "tracer", opts.TracerType, "error", err.Error())
return
}
globalTracer.set(t, closer, opts.debug)
})
}
// TODO Use openTelemetry https://github.com/sourcegraph/sourcegraph/issues/27386
func newTracer(opts *options, prevTracer TracerType) (opentracing.Tracer, io.Closer, error) {
if opts.TracerType == None {
log15.Info("tracing disabled")
if prevTracer == Datadog {
ddtracer.Stop()
}
return opentracing.NoopTracer{}, nil, nil
}
if opts.TracerType == Datadog {
log15.Info("Datadog: tracing enabled")
tracer := ddopentracing.New(ddtracer.WithService(opts.serviceName),
ddtracer.WithDebugMode(opts.debug),
ddtracer.WithServiceVersion(opts.version), ddtracer.WithEnv(opts.env))
return tracer, nil, nil
}
if prevTracer == Datadog {
ddtracer.Stop()
}
log15.Info("opentracing: enabled")
cfg, err := jaegercfg.FromEnv()
cfg.ServiceName = opts.serviceName
if err != nil {
return nil, nil, errors.Wrap(err, "jaegercfg.FromEnv failed")
}
cfg.Tags = append(cfg.Tags, opentracing.Tag{Key: "service.version", Value: opts.version}, opentracing.Tag{Key: "service.env", Value: opts.env})
if reflect.DeepEqual(cfg.Sampler, &jaegercfg.SamplerConfig{}) {
// Default sampler configuration for when it is not specified via
// JAEGER_SAMPLER_* env vars. In most cases, this is sufficient
// enough to connect Sourcegraph to Jaeger without any env vars.
cfg.Sampler.Type = jaeger.SamplerTypeConst
cfg.Sampler.Param = 1
}
tracer, closer, err := cfg.NewTracer(
jaegercfg.Logger(log15Logger{}),
jaegercfg.Metrics(jaegermetrics.NullFactory),
)
if err != nil {
return nil, nil, errors.Wrap(err, "jaegercfg.NewTracer failed")
}
return tracer, closer, nil
}
type log15Logger struct{}
func (l log15Logger) Error(msg string) { log15.Error(msg) }
func (l log15Logger) Infof(msg string, args ...any) {
log15.Info(fmt.Sprintf(msg, args...))
}
// move to OpenTelemetry https://github.com/sourcegraph/sourcegraph/issues/27386
// switchableTracer implements opentracing.Tracer. The underlying opentracer used is switchable (set via
// the `set` method).
type switchableTracer struct {
mu sync.RWMutex
opentracer opentracing.Tracer
tracerCloser io.Closer
log bool
}
// move to OpenTelemetry https://github.com/sourcegraph/sourcegraph/issues/27386
func newSwitchableTracer() *switchableTracer {
return &switchableTracer{opentracer: opentracing.NoopTracer{}}
}
func (t *switchableTracer) StartSpan(operationName string, opts ...opentracing.StartSpanOption) opentracing.Span {
t.mu.RLock()
defer t.mu.RUnlock()
if t.log {
log15.Info("opentracing: StartSpan", "operationName", operationName, "opentracer", fmt.Sprintf("%T", t.opentracer))
}
return t.opentracer.StartSpan(operationName, opts...)
}
func (t *switchableTracer) Inject(sm opentracing.SpanContext, format any, carrier any) error {
t.mu.RLock()
defer t.mu.RUnlock()
if t.log {
log15.Info("opentracing: Inject", "opentracer", fmt.Sprintf("%T", t.opentracer))
}
return t.opentracer.Inject(sm, format, carrier)
}
func (t *switchableTracer) Extract(format any, carrier any) (opentracing.SpanContext, error) {
t.mu.RLock()
defer t.mu.RUnlock()
if t.log {
log15.Info("opentracing: Extract", "tracer", fmt.Sprintf("%T", t.opentracer))
}
return t.opentracer.Extract(format, carrier)
}
func (t *switchableTracer) set(tracer opentracing.Tracer, tracerCloser io.Closer, log bool) {
t.mu.Lock()
defer t.mu.Unlock()
if tc := t.tracerCloser; tc != nil {
// Close the old tracerCloser outside the critical zone
go tc.Close()
}
t.tracerCloser = tracerCloser
t.opentracer = tracer
t.log = log
}
|
[
"\"MUX_ANALYTICS_TRACE_RATE\"",
"\"DD_ENV\""
] |
[] |
[
"MUX_ANALYTICS_TRACE_RATE",
"DD_ENV"
] |
[]
|
["MUX_ANALYTICS_TRACE_RATE", "DD_ENV"]
|
go
| 2 | 0 | |
app.go
|
package main
import (
"fmt"
"io"
"log"
"net/http"
"os"
"strings"
"time"
"github.com/kjk/notionapi"
)
const version = "1.0.0"
type App struct {
client *notionapi.Client
pageID string
exportType string
exportDir string
}
func main() {
authToken := os.Getenv("NOTION_TOKEN")
pageID := os.Getenv("NOTION_PAGEID")
if authToken == "" || pageID == "" {
log.Fatalln("You have to set the env vars NOTION_TOKEN and NOTION_PAGEID.")
}
app := &App{
client: ¬ionapi.Client{
AuthToken: authToken,
},
pageID: pageID,
exportType: os.Getenv("NOTION_EXPORTTYPE"),
}
var err error
app.exportDir = os.Getenv("NOTION_EXPORTDIR")
if app.exportDir == "" {
app.exportDir, err = os.Getwd()
if err != nil {
log.Fatal(err)
}
}
log.Printf("notionbackup (v%s) | Starting the export process ...\n", version)
startTime := time.Now()
exportURL, err := app.exportPageURL(true)
if err != nil {
log.Fatal(err)
}
log.Printf("Notion export successful. Starting to download the exported .zip file now ...\n")
bytesWritten := app.saveToFile(exportURL)
log.Printf("Notion export (page id: %s) took %s, %d bytes written.\n", app.pageID, time.Since(startTime).String(), bytesWritten)
}
func (app *App) exportPageURL(recursive bool) (string, error) {
if app.exportType == "" {
app.exportType = "markdown"
}
// support full url, like https://www.notion.so/username/PageName-abcdefghia1f4505762g63874a1e97yz
if strings.HasPrefix(app.pageID, "https://") {
app.pageID = notionapi.ExtractNoDashIDFromNotionURL(app.pageID)
}
return app.client.RequestPageExportURL(app.pageID, app.exportType, recursive)
}
func (app *App) saveToFile(exportURL string) int64 {
fileName := fmt.Sprintf("%s_%s.zip", time.Now().Format("20060102150405"), app.pageID)
if err := os.MkdirAll(app.exportDir, 0755); err != nil {
log.Fatal(err)
}
sep := string(os.PathSeparator)
if strings.HasSuffix(app.exportDir, sep) {
sep = ""
}
path := strings.Join([]string{app.exportDir, fileName}, sep)
file, err := os.Create(path)
if err != nil {
log.Fatalln(err)
}
defer file.Close()
resp, err := http.Get(exportURL)
if err != nil {
log.Fatalln(err)
}
defer resp.Body.Close()
bytesWritten, err := io.Copy(file, resp.Body)
if err != nil {
log.Fatalln(err)
}
return bytesWritten
}
|
[
"\"NOTION_TOKEN\"",
"\"NOTION_PAGEID\"",
"\"NOTION_EXPORTTYPE\"",
"\"NOTION_EXPORTDIR\""
] |
[] |
[
"NOTION_EXPORTTYPE",
"NOTION_TOKEN",
"NOTION_EXPORTDIR",
"NOTION_PAGEID"
] |
[]
|
["NOTION_EXPORTTYPE", "NOTION_TOKEN", "NOTION_EXPORTDIR", "NOTION_PAGEID"]
|
go
| 4 | 0 | |
core/chaincode/platforms/golang/hash.go
|
/*
Copyright IBM Corp. 2016 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package golang
import (
"archive/tar"
"bytes"
"encoding/hex"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/golang/protobuf/proto"
"github.com/op/go-logging"
"github.com/spf13/viper"
cutil "github.com/hyperledger/fabric/core/container/util"
"github.com/hyperledger/fabric/core/util"
pb "github.com/hyperledger/fabric/protos/peer"
)
var logger = logging.MustGetLogger("golang/hash")
//core hash computation factored out for testing
func computeHash(contents []byte, hash []byte) []byte {
newSlice := make([]byte, len(hash)+len(contents))
//copy the contents
copy(newSlice[0:len(contents)], contents[:])
//add the previous hash
copy(newSlice[len(contents):], hash[:])
//compute new hash
hash = util.ComputeCryptoHash(newSlice)
return hash
}
//hashFilesInDir computes h=hash(h,file bytes) for each file in a directory
//Directory entries are traversed recursively. In the end a single
//hash value is returned for the entire directory structure
func hashFilesInDir(rootDir string, dir string, hash []byte, tw *tar.Writer) ([]byte, error) {
currentDir := filepath.Join(rootDir, dir)
logger.Debugf("hashFiles %s", currentDir)
//ReadDir returns sorted list of files in dir
fis, err := ioutil.ReadDir(currentDir)
if err != nil {
return hash, fmt.Errorf("ReadDir failed %s\n", err)
}
for _, fi := range fis {
name := filepath.Join(dir, fi.Name())
if fi.IsDir() {
var err error
hash, err = hashFilesInDir(rootDir, name, hash, tw)
if err != nil {
return hash, err
}
continue
}
fqp := filepath.Join(rootDir, name)
buf, err := ioutil.ReadFile(fqp)
if err != nil {
fmt.Printf("Error reading %s\n", err)
return hash, err
}
//get the new hash from file contents
hash = computeHash(buf, hash)
if tw != nil {
is := bytes.NewReader(buf)
if err = cutil.WriteStreamToPackage(is, fqp, filepath.Join("src", name), tw); err != nil {
return hash, fmt.Errorf("Error adding file to tar %s", err)
}
}
}
return hash, nil
}
func isCodeExist(tmppath string) error {
file, err := os.Open(tmppath)
if err != nil {
return fmt.Errorf("Download failed %s", err)
}
fi, err := file.Stat()
if err != nil {
return fmt.Errorf("Could not stat file %s", err)
}
if !fi.IsDir() {
return fmt.Errorf("File %s is not dir\n", file.Name())
}
return nil
}
func getCodeFromHTTP(path string) (codegopath string, err error) {
codegopath = ""
err = nil
logger.Debugf("getCodeFromHTTP %s", path)
// The following could be done with os.Getenv("GOPATH") but we need to change it later so this prepares for that next step
env := os.Environ()
var origgopath string
var gopathenvIndex int
for i, v := range env {
if strings.Index(v, "GOPATH=") == 0 {
p := strings.SplitAfter(v, "GOPATH=")
origgopath = p[1]
gopathenvIndex = i
break
}
}
if origgopath == "" {
err = fmt.Errorf("GOPATH not defined")
return
}
// Only take the first element of GOPATH
gopath := filepath.SplitList(origgopath)[0]
// Define a new gopath in which to download the code
newgopath := filepath.Join(gopath, "_usercode_")
//ignore errors.. _usercode_ might exist. TempDir will catch any other errors
os.Mkdir(newgopath, 0755)
if codegopath, err = ioutil.TempDir(newgopath, ""); err != nil {
err = fmt.Errorf("could not create tmp dir under %s(%s)", newgopath, err)
return
}
//go paths can have multiple dirs. We create a GOPATH with two source tree's as follows
//
// <temporary empty folder to download chaincode source> : <local go path with OBC source>
//
//This approach has several goodness:
// . Go will pick the first path to download user code (which we will delete after processing)
// . GO will not download OBC as it is in the second path. GO will use the local OBC for generating chaincode image
// . network savings
// . more secure
// . as we are not downloading OBC, private, password-protected OBC repo's become non-issue
env[gopathenvIndex] = "GOPATH=" + codegopath + string(os.PathListSeparator) + origgopath
// Use a 'go get' command to pull the chaincode from the given repo
logger.Debugf("go get %s", path)
cmd := exec.Command("go", "get", path)
cmd.Env = env
var out bytes.Buffer
cmd.Stdout = &out
var errBuf bytes.Buffer
cmd.Stderr = &errBuf //capture Stderr and print it on error
err = cmd.Start()
// Create a go routine that will wait for the command to finish
done := make(chan error, 1)
go func() {
done <- cmd.Wait()
}()
select {
case <-time.After(time.Duration(viper.GetInt("chaincode.deploytimeout")) * time.Millisecond):
// If pulling repos takes too long, we should give up
// (This can happen if a repo is private and the git clone asks for credentials)
if err = cmd.Process.Kill(); err != nil {
err = fmt.Errorf("failed to kill: %s", err)
} else {
err = errors.New("Getting chaincode took too long")
}
case err = <-done:
// If we're here, the 'go get' command must have finished
if err != nil {
err = fmt.Errorf("'go get' failed with error: \"%s\"\n%s", err, string(errBuf.Bytes()))
}
}
return
}
func getCodeFromFS(path string) (codegopath string, err error) {
logger.Debugf("getCodeFromFS %s", path)
gopath := os.Getenv("GOPATH")
if gopath == "" {
err = fmt.Errorf("GOPATH not defined")
return
}
// Only take the first element of GOPATH
codegopath = filepath.SplitList(gopath)[0]
return
}
//collectChaincodeFiles collects chaincode files and generates hashcode for the
//package. If path is a HTTP(s) url it downloads the code first.
//NOTE: for dev mode, user builds and runs chaincode manually. The name provided
//by the user is equivalent to the path. This method will treat the name
//as codebytes and compute the hash from it. ie, user cannot run the chaincode
//with the same (name, ctor, args)
func collectChaincodeFiles(spec *pb.ChaincodeSpec, tw *tar.Writer) (string, error) {
if spec == nil {
return "", fmt.Errorf("Cannot collect files from nil spec")
}
chaincodeID := spec.ChaincodeID
if chaincodeID == nil || chaincodeID.Path == "" {
return "", fmt.Errorf("Cannot collect files from empty chaincode path")
}
ctor := spec.CtorMsg
if ctor == nil || len(ctor.Args) == 0 {
return "", fmt.Errorf("Cannot collect files from empty ctor")
}
//code root will point to the directory where the code exists
//in the case of http it will be a temporary dir that
//will have to be deleted
var codegopath string
var ishttp bool
defer func() {
if ishttp && codegopath != "" {
os.RemoveAll(codegopath)
}
}()
path := chaincodeID.Path
var err error
var actualcodepath string
if strings.HasPrefix(path, "http://") {
ishttp = true
actualcodepath = path[7:]
codegopath, err = getCodeFromHTTP(actualcodepath)
} else if strings.HasPrefix(path, "https://") {
ishttp = true
actualcodepath = path[8:]
codegopath, err = getCodeFromHTTP(actualcodepath)
} else {
actualcodepath = path
codegopath, err = getCodeFromFS(path)
}
if err != nil {
return "", fmt.Errorf("Error getting code %s", err)
}
tmppath := filepath.Join(codegopath, "src", actualcodepath)
if err = isCodeExist(tmppath); err != nil {
return "", fmt.Errorf("code does not exist %s", err)
}
ctorbytes, err := proto.Marshal(ctor)
if err != nil {
return "", fmt.Errorf("Error marshalling constructor: %s", err)
}
hash := util.GenerateHashFromSignature(actualcodepath, ctorbytes)
hash, err = hashFilesInDir(filepath.Join(codegopath, "src"), actualcodepath, hash, tw)
if err != nil {
return "", fmt.Errorf("Could not get hashcode for %s - %s\n", path, err)
}
return hex.EncodeToString(hash[:]), nil
}
|
[
"\"GOPATH\"",
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project_sample.settings.dev")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
github/github.go
|
package github
import (
"context"
"fmt"
"log"
"os"
"strconv"
"time"
"gopkg.in/src-d/go-billy.v4/memfs"
"github.com/google/go-github/github"
"golang.org/x/oauth2"
"gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4/config"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/object"
"gopkg.in/src-d/go-git.v4/storage/memory"
)
//go:generate mockgen -source github.go -destination mock/mock_github.go
type Client interface {
PushNewFileToBranch(commit *GitCommit) error
CreateNewPullRequest(commit *GitCommit) error
}
type client struct {
gCli *github.Client
Github
}
type Github struct {
Organization string
Repo string
Token string
User string
}
type File struct {
Name string
Path string
Content []byte
}
func (f File) FullPath() string {
return f.Path + f.Name
}
type GitCommit struct {
CommitAuthorName string
CommitAuthorEmail string
Branch string
FileName string
FileContent string
Files []*File
CommitMessage string
PullRequestMessage string
PullRequestTitle string
}
func NewGitCommit(files []*File, message string) *GitCommit {
return &GitCommit{
// TODO: set config
CommitAuthorName: "hayashiki",
CommitAuthorEmail: os.Getenv("EMAIL"),
Files: files,
Branch: strconv.FormatInt(time.Now().UnixNano(), 10),
CommitMessage: message,
PullRequestMessage: message,
PullRequestTitle: message,
}
}
func NewClient(org, repo, token, user string) Client {
ctx := context.Background()
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: token},
)
tc := oauth2.NewClient(ctx, ts)
cli := github.NewClient(tc)
return &client{
cli,
Github{
org,
repo,
token,
user,
},
}
}
func (c *client) PushNewFileToBranch(commit *GitCommit) error {
f := memfs.New()
url := fmt.Sprintf("https://%s:%[email protected]/%s/%s.git", c.User, c.Token, c.Organization, c.Repo)
repo, err := git.Clone(memory.NewStorage(), f, &git.CloneOptions{
URL: url,
ReferenceName: plumbing.ReferenceName("refs/heads/master"),
})
if err != nil {
return err
}
w, err := repo.Worktree()
err = w.Checkout(&git.CheckoutOptions{
Create: true,
Branch: plumbing.NewBranchReferenceName(commit.Branch),
})
if err != nil {
return err
}
for _, file := range commit.Files {
log.Printf("File name is %s", file.FullPath())
// rangeでdefer i.Closeってどうすればいいんだっけ?
i, err := f.OpenFile(file.FullPath(), os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
log.Printf("file.OpenFile err: %v", file.Content)
return err
}
if _, err = i.Write(file.Content); err != nil {
log.Printf("git.Writes err: %v", file.Content)
return err
}
if _, err := w.Add(file.FullPath()); err != nil {
log.Printf("git.Add err: %v", file.Name)
return err
}
}
ref := plumbing.ReferenceName(commit.Branch)
if err == nil {
hash, err := w.Commit(commit.CommitMessage, &git.CommitOptions{
Author: &object.Signature{
Name: commit.CommitAuthorName,
Email: commit.CommitAuthorEmail,
When: time.Now(),
},
})
if err != nil {
return err
}
repo.Storer.SetReference(plumbing.NewReferenceFromStrings(commit.Branch, hash.String()))
}
originRefSpec := fmt.Sprintf("refs/heads/%s", commit.Branch)
remote, err := repo.Remote("origin")
if err == nil {
err = remote.Push(&git.PushOptions{
Progress: os.Stdout,
RefSpecs: []config.RefSpec{
config.RefSpec(ref + ":" + plumbing.ReferenceName(originRefSpec)),
},
})
}
if err != nil {
return err
}
return nil
}
func (c *client) CreateNewPullRequest(commit *GitCommit) error {
newPR := &github.NewPullRequest{
Title: github.String(commit.PullRequestTitle),
Head: github.String(commit.Branch),
Base: github.String("master"),
Body: github.String(commit.PullRequestMessage),
MaintainerCanModify: github.Bool(true),
}
pr, _, err := c.gCli.PullRequests.Create(context.Background(), c.Organization, c.Repo, newPR)
if err != nil {
return err
}
// TODO 同時タイミングでPRをおくるとエラーになるのでブランチプッシュだけにしておく
//o := &github.PullRequestOptions{
// SHA: pr.GetHead().GetSHA(),
// MergeMethod: "rebase",
//}
//
//_, _, err = c.gCli.PullRequests.Merge(context.Background(), c.Organization, c.Repo, pr.GetNumber(), "Mereged!", o)
//
//if err != nil {
// return err
//}
log.Printf("PR created: %s\n", pr.GetHTMLURL())
return nil
}
|
[
"\"EMAIL\""
] |
[] |
[
"EMAIL"
] |
[]
|
["EMAIL"]
|
go
| 1 | 0 | |
src/test/java/com/spotify/docker/client/DefaultDockerClientTest.java
|
/*
* Copyright (c) 2014 Spotify AB.
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.docker.client;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import com.google.common.io.Resources;
import com.google.common.net.HostAndPort;
import com.google.common.util.concurrent.SettableFuture;
import com.fasterxml.jackson.databind.util.StdDateFormat;
import com.spotify.docker.client.messages.Container;
import com.spotify.docker.client.messages.ContainerConfig;
import com.spotify.docker.client.messages.ContainerCreation;
import com.spotify.docker.client.messages.ContainerExit;
import com.spotify.docker.client.messages.ContainerInfo;
import com.spotify.docker.client.messages.HostConfig;
import com.spotify.docker.client.messages.Image;
import com.spotify.docker.client.messages.ImageInfo;
import com.spotify.docker.client.messages.Info;
import com.spotify.docker.client.messages.ProgressMessage;
import com.spotify.docker.client.messages.RemovedImage;
import com.spotify.docker.client.messages.Version;
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.net.Socket;
import java.net.URI;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import static com.google.common.base.Optional.fromNullable;
import static com.google.common.base.Strings.isNullOrEmpty;
import static com.google.common.collect.Iterables.getOnlyElement;
import static com.spotify.docker.client.DefaultDockerClient.NO_TIMEOUT;
import static com.spotify.docker.client.DockerClient.BuildParameter.FORCE_RM;
import static com.spotify.docker.client.DockerClient.BuildParameter.NO_CACHE;
import static com.spotify.docker.client.DockerClient.BuildParameter.NO_RM;
import static com.spotify.docker.client.DockerClient.ListImagesParam.allImages;
import static com.spotify.docker.client.DockerClient.ListImagesParam.danglingImages;
import static com.spotify.docker.client.messages.RemovedImage.Type.DELETED;
import static com.spotify.docker.client.messages.RemovedImage.Type.UNTAGGED;
import static java.lang.Long.toHexString;
import static java.lang.String.format;
import static java.lang.System.getenv;
import static org.apache.commons.lang.StringUtils.containsIgnoreCase;
import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.any;
import static org.hamcrest.Matchers.anyOf;
import static org.hamcrest.Matchers.both;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.hasItem;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.isEmptyOrNullString;
import static org.hamcrest.Matchers.lessThan;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
import static org.hamcrest.Matchers.startsWith;
import static org.hamcrest.collection.IsEmptyCollection.empty;
import static org.hamcrest.collection.IsMapContaining.hasEntry;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
public class DefaultDockerClientTest {
public static final String DOCKER_ENDPOINT;
public static final String DOCKER_CERT_PATH;
@Rule public ExpectedException exception = ExpectedException.none();
static {
// Parse DOCKER_HOST
DOCKER_CERT_PATH = env("DOCKER_CERT_PATH", "");
int dockerPort = Integer.valueOf(env("DOCKER_PORT", "2375"));
String dockerHost;
if (System.getProperty("os.name").toLowerCase().equals("linux")) {
dockerHost = env("DOCKER_HOST", "unix:///var/run/docker.sock");
} else {
dockerHost = env("DOCKER_HOST", ":" + dockerPort);
}
if (!dockerHost.startsWith("unix://")) {
final String stripped = dockerHost.replaceAll(".*://", "");
final HostAndPort hostAndPort = HostAndPort.fromString(stripped);
final String host = hostAndPort.getHostText();
final String dockerAddress = isNullOrEmpty(host) ? "localhost" : host;
String scheme;
if (!isNullOrEmpty(DOCKER_CERT_PATH)) {
scheme = "https";
} else {
scheme = "http";
}
DOCKER_ENDPOINT = format("%s://%s:%d", scheme, dockerAddress,
hostAndPort.getPortOrDefault(dockerPort));
} else {
DOCKER_ENDPOINT = dockerHost;
}
}
private static String env(final String key, final String defaultValue) {
return fromNullable(getenv(key)).or(defaultValue);
}
private final String nameTag = toHexString(ThreadLocalRandom.current().nextLong());
private DefaultDockerClient sut;
@Before
public void setup() throws Exception {
final DefaultDockerClient.Builder builder = DefaultDockerClient.builder()
.uri(DOCKER_ENDPOINT);
if (!isNullOrEmpty(DOCKER_CERT_PATH)) {
builder.dockerCertificates(new DockerCertificates(Paths.get(DOCKER_CERT_PATH)));
}
sut = builder.build();
}
@After
public void tearDown() throws Exception {
// Remove containers
final List<Container> containers = sut.listContainers(DockerClient.ListContainersParam.allContainers());
for (Container container : containers) {
final ContainerInfo info = sut.inspectContainer(container.id());
if (info != null && info.name().contains(nameTag)) {
sut.killContainer(info.id());
sut.removeContainer(info.id());
}
}
// Close the client
sut.close();
}
@Test
public void testPullWithTag() throws Exception {
sut.pull("busybox:buildroot-2014.02");
}
@Test
public void testPingReturnsOk() throws Exception {
final String pingResponse = sut.ping();
assertThat(pingResponse, equalTo("OK"));
}
@Test
public void testVersion() throws Exception {
final Version version = sut.version();
assertThat(version.apiVersion(), not(isEmptyOrNullString()));
assertThat(version.arch(), not(isEmptyOrNullString()));
assertThat(version.gitCommit(), not(isEmptyOrNullString()));
assertThat(version.goVersion(), not(isEmptyOrNullString()));
assertThat(version.kernelVersion(), not(isEmptyOrNullString()));
assertThat(version.os(), not(isEmptyOrNullString()));
assertThat(version.version(), not(isEmptyOrNullString()));
}
@Test
public void testInfo() throws Exception {
final Info info = sut.info();
assertThat(info.executionDriver(), not(isEmptyOrNullString()));
assertThat(info.initPath(), not(isEmptyOrNullString()));
assertThat(info.kernelVersion(), not(isEmptyOrNullString()));
assertThat(info.storageDriver(), not(isEmptyOrNullString()));
assertThat(info.sockets(), not(empty()));
}
@Test
public void testRemoveImage() throws Exception {
sut.pull("cirros");
final String imageLatest = "cirros:latest";
final String imageVersion = "cirros:0.3.0";
final Set<RemovedImage> removedImages = Sets.newHashSet();
removedImages.addAll(sut.removeImage(imageLatest));
removedImages.addAll(sut.removeImage(imageVersion));
assertThat(removedImages, containsInAnyOrder(
new RemovedImage(UNTAGGED, imageLatest),
new RemovedImage(UNTAGGED, imageVersion),
new RemovedImage(DELETED,
"d20c88c95b28c21fff7e3d2d98a9ab85daebad04d6185ff572068167c20d7374"),
new RemovedImage(DELETED,
"47595debf9e9440a28b20af6e9b2f83ca4d0ce4902bcea5e506c2ad42374bf33"),
new RemovedImage(DELETED,
"f8184986c5454e9486bb32155cf0eb69b477893cc0717a29f1ff504f44e026d8"),
new RemovedImage(DELETED,
"5dd62fd3b727a250becfbb341e80fa7047a9fb5812f7ee184973d2e74d6bfd4d"),
new RemovedImage(DELETED,
"16a464be5494a73be34a3055b77ae00d072a4f9389897d1a786de0079219aaeb")
));
// Try to inspect deleted image and make sure ImageNotFoundException is thrown
try {
sut.inspectImage(imageLatest);
fail("inspectImage should have thrown ImageNotFoundException");
} catch (ImageNotFoundException e) {
// we should get exception because we deleted image
}
}
@Test
public void testTag() throws Exception {
sut.pull("busybox");
// Tag image
final String newImageName = "testRepo:testTag";
sut.tag("busybox", newImageName);
// Verify tag was successful by trying to remove it.
final RemovedImage removedImage = getOnlyElement(sut.removeImage(newImageName));
assertThat(removedImage, equalTo(new RemovedImage(UNTAGGED, newImageName)));
}
@Test
public void testInspectImage() throws Exception {
sut.pull("busybox");
final ImageInfo info = sut.inspectImage("busybox");
assertThat(info, notNullValue());
assertThat(info.architecture(), not(isEmptyOrNullString()));
assertThat(info.author(), not(isEmptyOrNullString()));
assertThat(info.config(), notNullValue());
assertThat(info.container(), not(isEmptyOrNullString()));
assertThat(info.containerConfig(), notNullValue());
assertThat(info.comment(), notNullValue());
assertThat(info.created(), notNullValue());
assertThat(info.dockerVersion(), not(isEmptyOrNullString()));
assertThat(info.id(), not(isEmptyOrNullString()));
assertThat(info.os(), equalTo("linux"));
assertThat(info.parent(), not(isEmptyOrNullString()));
assertThat(info.size(), notNullValue());
}
@Test
public void testCustomProgressMessageHandler() throws Exception {
final List<ProgressMessage> messages = new ArrayList<>();
sut.pull("busybox", new ProgressHandler() {
@Override
public void progress(ProgressMessage message) throws DockerException {
messages.add(message);
}
});
// Verify that we have multiple messages, and each one has a non-null field
assertThat(messages, not(empty()));
for (ProgressMessage message : messages) {
assertTrue(message.error() != null ||
message.id() != null ||
message.progress() != null ||
message.progressDetail() != null ||
message.status() != null ||
message.stream() != null);
}
}
@Test
public void testBuildImageId() throws Exception {
final String dockerDirectory = Resources.getResource("dockerDirectory").getPath();
final AtomicReference<String> imageIdFromMessage = new AtomicReference<>();
final String returnedImageId = sut.build(
Paths.get(dockerDirectory), "test", new ProgressHandler() {
@Override
public void progress(ProgressMessage message) throws DockerException {
final String imageId = message.buildImageId();
if (imageId != null) {
imageIdFromMessage.set(imageId);
}
}
});
assertThat(returnedImageId, is(imageIdFromMessage.get()));
}
@Test
public void testBuildName() throws Exception {
final String imageName = "test-build-name";
final String dockerDirectory = Resources.getResource("dockerDirectory").getPath();
final String imageId = sut.build(Paths.get(dockerDirectory), imageName);
final ImageInfo info = sut.inspectImage(imageName);
assertThat(info.id(), startsWith(imageId));
}
@Test
public void testBuildNoCache() throws Exception {
final String dockerDirectory = Resources.getResource("dockerDirectory").getPath();
final String usingCache = "Using cache";
// Build once to make sure we have cached images.
sut.build(Paths.get(dockerDirectory));
// Build again and make sure we used cached image by parsing output.
final AtomicBoolean usedCache = new AtomicBoolean(false);
sut.build(Paths.get(dockerDirectory), "test", new ProgressHandler() {
@Override
public void progress(ProgressMessage message) throws DockerException {
if (message.stream().contains(usingCache)) {
usedCache.set(true);
}
}
});
assertTrue(usedCache.get());
// Build again with NO_CACHE set, and verify we don't use cache.
sut.build(Paths.get(dockerDirectory), "test", new ProgressHandler() {
@Override
public void progress(ProgressMessage message) throws DockerException {
assertThat(message.stream(), not(containsString(usingCache)));
}
}, NO_CACHE);
}
@Test
public void testBuildNoRm() throws Exception {
final String dockerDirectory = Resources.getResource("dockerDirectory").getPath();
final String removingContainers = "Removing intermediate container";
// Test that intermediate containers are removed with FORCE_RM by parsing output. We must
// set NO_CACHE so that docker will generate some containers to remove.
final AtomicBoolean removedContainer = new AtomicBoolean(false);
sut.build(Paths.get(dockerDirectory), "test", new ProgressHandler() {
@Override
public void progress(ProgressMessage message) throws DockerException {
if (containsIgnoreCase(message.stream(), removingContainers)) {
removedContainer.set(true);
}
}
}, NO_CACHE, FORCE_RM);
assertTrue(removedContainer.get());
// Set NO_RM and verify we don't get message that containers were removed.
sut.build(Paths.get(dockerDirectory), "test", new ProgressHandler() {
@Override
public void progress(ProgressMessage message) throws DockerException {
assertThat(message.stream(), not(containsString(removingContainers)));
}
}, NO_CACHE, NO_RM);
}
@Test
public void testGetImageIdFromBuild() {
// Include a new line because that's what docker returns.
final ProgressMessage message1 = new ProgressMessage()
.stream("Successfully built 2d6e00052167\n");
assertThat(message1.buildImageId(), is("2d6e00052167"));
final ProgressMessage message2 = new ProgressMessage().id("123");
assertThat(message2.buildImageId(), nullValue());
final ProgressMessage message3 = new ProgressMessage().stream("Step 2 : CMD[]");
assertThat(message3.buildImageId(), nullValue());
}
public void testAnsiProgressHandler() throws Exception {
final ByteArrayOutputStream out = new ByteArrayOutputStream();
sut.pull("busybox", new AnsiProgressHandler(new PrintStream(out)));
// The progress handler uses ascii escape characters to move the cursor around to nicely print
// progress bars. This is hard to test programmatically, so let's just verify the output
// contains some expected phrases.
assertThat(out.toString(), allOf(containsString("Pulling repository busybox"),
containsString("Download complete")));
}
@Test
public void testExportContainer() throws Exception {
// Pull image
sut.pull("busybox");
// Create container
final ContainerConfig config = ContainerConfig.builder()
.image("busybox")
.build();
final String name = randomName();
final ContainerCreation creation = sut.createContainer(config, name);
final String id = creation.id();
ImmutableSet.Builder<String> files = ImmutableSet.builder();
try (TarArchiveInputStream tarStream = new TarArchiveInputStream(sut.exportContainer(id))) {
TarArchiveEntry entry;
while ((entry = tarStream.getNextTarEntry()) != null) {
files.add(entry.getName());
}
}
// Check that some common files exist
assertThat(files.build(), both(hasItem("bin/")).and(hasItem("bin/sh")));
}
@Test
public void testCopyContainer() throws Exception {
// Pull image
sut.pull("busybox");
// Create container
final ContainerConfig config = ContainerConfig.builder()
.image("busybox")
.build();
final String name = randomName();
final ContainerCreation creation = sut.createContainer(config, name);
final String id = creation.id();
ImmutableSet.Builder<String> files = ImmutableSet.builder();
try (TarArchiveInputStream tarStream = new TarArchiveInputStream(sut.copyContainer(id, "/usr/bin"))) {
TarArchiveEntry entry;
while ((entry = tarStream.getNextTarEntry()) != null) {
files.add(entry.getName());
}
}
// Check that some common files exist
assertThat(files.build(), both(hasItem("bin/")).and(hasItem("bin/wc")));
}
@Test
public void testStopContainer() throws Exception {
sut.pull("busybox");
final ContainerConfig containerConfig = ContainerConfig.builder()
.image("busybox")
// make sure the container's busy doing something upon startup
.cmd("sh", "-c", "while :; do sleep 1; done")
.build();
final String containerName = randomName();
final ContainerCreation containerCreation = sut.createContainer(containerConfig, containerName);
final String containerId = containerCreation.id();
sut.startContainer(containerId);
// Must be running
{
final ContainerInfo containerInfo = sut.inspectContainer(containerId);
assertThat(containerInfo.state().running(), equalTo(true));
}
sut.stopContainer(containerId, 5);
// Must no longer be running
{
final ContainerInfo containerInfo = sut.inspectContainer(containerId);
assertThat(containerInfo.state().running(), equalTo(false));
}
}
@Test
public void testRestartContainer() throws Exception {
sut.pull("busybox");
final ContainerConfig containerConfig = ContainerConfig.builder()
.image("busybox")
// make sure the container's busy doing something upon startup
.cmd("sh", "-c", "while :; do sleep 1; done")
.build();
final String containerName = randomName();
final ContainerCreation containerCreation = sut.createContainer(containerConfig, containerName);
final String containerId = containerCreation.id();
sut.startContainer(containerId);
// Must be running
{
final ContainerInfo containerInfo = sut.inspectContainer(containerId);
assertThat(containerInfo.state().running(), equalTo(true));
}
final ContainerInfo tempContainerInfo = sut.inspectContainer(containerId);
final Integer originalPid = tempContainerInfo.state().pid();
sut.restartContainer(containerId);
// Should be running with short run time
{
final ContainerInfo containerInfoLatest = sut.inspectContainer(containerId);
assertTrue(containerInfoLatest.state().running());
assertThat(containerInfoLatest.state().pid(), not(equalTo(originalPid)));
}
}
@Test
public void integrationTest() throws Exception {
// Pull image
sut.pull("busybox");
// Create container
final ContainerConfig config = ContainerConfig.builder()
.image("busybox")
.cmd("sh", "-c", "while :; do sleep 1; done")
.build();
final String name = randomName();
final ContainerCreation creation = sut.createContainer(config, name);
final String id = creation.id();
assertThat(creation.getWarnings(), anyOf(is(empty()), is(nullValue())));
assertThat(id, is(any(String.class)));
// Inspect using container ID
{
final ContainerInfo info = sut.inspectContainer(id);
assertThat(info.id(), equalTo(id));
assertThat(info.config().image(), equalTo(config.image()));
assertThat(info.config().cmd(), equalTo(config.cmd()));
}
// Inspect using container name
{
final ContainerInfo info = sut.inspectContainer(name);
assertThat(info.config().image(), equalTo(config.image()));
assertThat(info.config().cmd(), equalTo(config.cmd()));
}
// Start container
sut.startContainer(id);
// Kill container
sut.killContainer(id);
// Remove the container
sut.removeContainer(id);
// Verify that the container is gone
exception.expect(ContainerNotFoundException.class);
sut.inspectContainer(id);
}
@Test
public void interruptTest() throws Exception {
// Pull image
sut.pull("busybox");
// Create container
final ContainerConfig config = ContainerConfig.builder()
.image("busybox")
.cmd("sh", "-c", "while :; do sleep 1; done")
.build();
final String name = randomName();
final ContainerCreation creation = sut.createContainer(config, name);
final String id = creation.id();
// Start container
sut.startContainer(id);
// Wait for container on a thread
final ExecutorService executorService = Executors.newSingleThreadExecutor();
final SettableFuture<Boolean> started = SettableFuture.create();
final SettableFuture<Boolean> interrupted = SettableFuture.create();
final Future<ContainerExit> exitFuture = executorService.submit(new Callable<ContainerExit>() {
@Override
public ContainerExit call() throws Exception {
try {
started.set(true);
return sut.waitContainer(id);
} catch (InterruptedException e) {
interrupted.set(true);
throw e;
}
}
});
// Interrupt waiting thread
started.get();
executorService.shutdownNow();
try {
exitFuture.get();
fail();
} catch (ExecutionException e) {
assertThat(e.getCause(), instanceOf(InterruptedException.class));
}
// Verify that the thread was interrupted
assertThat(interrupted.get(), is(true));
}
@Test(expected = DockerTimeoutException.class)
public void testConnectTimeout() throws Exception {
// Attempt to connect to reserved IP -> should timeout
try (final DefaultDockerClient connectTimeoutClient = DefaultDockerClient.builder()
.uri("http://240.0.0.1:2375")
.connectTimeoutMillis(100)
.readTimeoutMillis(NO_TIMEOUT)
.build()) {
connectTimeoutClient.version();
}
}
@Test(expected = DockerTimeoutException.class)
public void testReadTimeout() throws Exception {
try (final ServerSocket s = new ServerSocket()) {
// Bind and listen but do not accept -> read will time out.
s.bind(new InetSocketAddress("127.0.0.1", 0));
awaitConnectable(s.getInetAddress(), s.getLocalPort());
final DockerClient connectTimeoutClient = DefaultDockerClient.builder()
.uri("http://127.0.0.1:" + s.getLocalPort())
.connectTimeoutMillis(NO_TIMEOUT)
.readTimeoutMillis(100)
.build();
connectTimeoutClient.version();
}
}
@Test
public void testWaitContainer() throws Exception {
sut.pull("busybox");
// Create container
final ContainerConfig config = ContainerConfig.builder()
.image("busybox")
.cmd("sh", "-c", "while :; do sleep 1; done")
.build();
final String name = randomName();
final ContainerCreation creation = sut.createContainer(config, name);
final String id = creation.id();
// Start the container
sut.startContainer(id);
// Wait for container on a thread
final ExecutorService executorService = Executors.newSingleThreadExecutor();
final Future<ContainerExit> exitFuture = executorService.submit(new Callable<ContainerExit>() {
@Override
public ContainerExit call() throws Exception {
return sut.waitContainer(id);
}
});
// Wait for 40 seconds, then kill the container
Thread.sleep(40000);
sut.killContainer(id);
// Ensure that waiting on the container worked without exception
exitFuture.get();
}
@Test
public void testInspectContainerWithExposedPorts() throws Exception {
sut.pull("rohan/memcached-mini");
final ContainerConfig config = ContainerConfig.builder()
.image("rohan/memcached-mini")
.build();
final ContainerCreation container = sut.createContainer(config, randomName());
sut.startContainer(container.id());
final ContainerInfo containerInfo = sut.inspectContainer(container.id());
assertThat(containerInfo, notNullValue());
assertThat(containerInfo.networkSettings().ports(), hasEntry("11211/tcp", null));
}
@Test
public void testContainerWithHostConfig() throws Exception {
sut.pull("busybox");
final ContainerConfig config = ContainerConfig.builder()
.image("busybox")
.build();
final String name = randomName();
final ContainerCreation creation = sut.createContainer(config, name);
final String id = creation.id();
final boolean privileged = true;
final boolean publishAllPorts = true;
final String dns = "1.2.3.4";
final HostConfig expected = HostConfig.builder()
.privileged(privileged)
.publishAllPorts(publishAllPorts)
.dns(dns)
.build();
sut.startContainer(id, expected);
final HostConfig actual = sut.inspectContainer(id).hostConfig();
assertThat(actual.privileged(), equalTo(expected.privileged()));
assertThat(actual.publishAllPorts(), equalTo(expected.publishAllPorts()));
assertThat(actual.dns(), equalTo(expected.dns()));
}
@Test
public void testListImages() throws Exception {
sut.pull("busybox");
final List<Image> images = sut.listImages();
assertThat(images.size(), greaterThan(0));
// Verify that image contains valid values
final Image image = images.get(0);
assertThat(image.virtualSize(), greaterThan(0L));
assertThat(image.created(), not(isEmptyOrNullString()));
assertThat(image.id(), not(isEmptyOrNullString()));
assertThat(image.parentId(), not(isEmptyOrNullString()));
// Using allImages() should give us more images
final List<Image> allImages = sut.listImages(allImages());
assertThat(allImages.size(), greaterThan(images.size()));
// Including just dangling images should give us less images
final List<Image> danglingImages = sut.listImages(danglingImages());
assertThat(danglingImages.size(), lessThan(images.size()));
// Specifying both allImages() and danglingImages() should give us only dangling images
final List<Image> allAndDanglingImages = sut.listImages(allImages(), danglingImages());
assertThat(allAndDanglingImages.size(), equalTo(danglingImages.size()));
}
@Test
public void testDockerDateFormat() throws Exception {
// This is the created date for busybox converted from nanoseconds to milliseconds
final Date expected = new StdDateFormat().parse("2014-10-01T20:46:08.914Z");
final DockerDateFormat dateFormat = new DockerDateFormat();
// Verify DockerDateFormat handles millisecond precision correctly
final Date milli = dateFormat.parse("2014-10-01T20:46:08.914Z");
assertThat(milli, equalTo(expected));
// Verify DockerDateFormat converts nanosecond precision down to millisecond precision
final Date nano = dateFormat.parse("2014-10-01T20:46:08.914288461Z");
assertThat(nano, equalTo(expected));
// Verify the formatter works when used with the client
sut.pull("busybox");
final ImageInfo imageInfo = sut.inspectImage("busybox");
assertThat(imageInfo.created(), equalTo(expected));
}
@Test
public void testSsl() throws Exception {
// Build a run a container that contains a Docker instance configured with our SSL cert/key
final String imageName = "test-docker-ssl";
final String expose = "2376/tcp";
final String dockerDirectory = Resources.getResource("dockerSslDirectory").getPath();
sut.build(Paths.get(dockerDirectory), imageName);
final ContainerConfig containerConfig = ContainerConfig.builder()
.image(imageName)
.exposedPorts(expose)
.build();
final String containerName = randomName();
final ContainerCreation containerCreation = sut.createContainer(containerConfig, containerName);
final String containerId = containerCreation.id();
final HostConfig hostConfig = HostConfig.builder()
.privileged(true)
.publishAllPorts(true)
.build();
sut.startContainer(containerId, hostConfig);
// Determine where the Docker instance inside the container we just started is exposed
final String host;
if (DOCKER_ENDPOINT.startsWith("unix://")) {
host = "localhost";
} else {
host = URI.create(DOCKER_ENDPOINT).getHost();
}
final ContainerInfo containerInfo = sut.inspectContainer(containerId);
assertThat(containerInfo.state().running(), equalTo(true));
final String port = containerInfo.networkSettings().ports().get(expose).get(0).hostPort();
// Try to connect using SSL and our known cert/key
final DockerCertificates certs = new DockerCertificates(Paths.get(dockerDirectory));
final DockerClient c = new DefaultDockerClient(URI.create(format("https://%s:%s", host, port)),
certs);
assertThat(c.ping(), equalTo("OK"));
sut.stopContainer(containerId, 10);
}
private String randomName() {
return nameTag + '-' + toHexString(ThreadLocalRandom.current().nextLong());
}
private void awaitConnectable(final InetAddress address, final int port)
throws InterruptedException {
while (true) {
try (Socket ignored = new Socket(address, port)) {
return;
} catch (IOException e) {
Thread.sleep(100);
}
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
rplScripts/testRplProcPaint.py
|
# testRplProcPaint.py
#
# Test processing a rpl file with DTSA-II
# Note : rpl is converted from Oxford Paint set:
# 1. To Inca project, exported to .rpl/.raw Ripple file pair.
# 2. Converted with hyperspy to vector.
# 3. .rpl cleaned up manually... (need to fix...)
#
# Create the Oxford-Paint detector (just add it to the default probe
# instrument) by importing the oxford.msa spectrum from DTSA's add
# detector dialog. You need to manually set the size to 80 mm2 and
# the window to Moxtek manufacturers.
#
# Date Ver Who Notes
# 2015-05-22 0.90 JRM Initial example. Verified with Iona v.2015-05-01
# Longest part seemed to be computing the max px spec
#
import os
import shutil
import dtsa2.hyperTools as ht
import dtsa2 as dt2
import gov.nist.microanalysis.EPQLibrary as epq
import gov.nist.microanalysis.EPQTools as ept
gitDir = os.environ['GIT_HOME']
relPrj = "/dtsa2Scripts/rplScripts"
datDir = gitDir + relPrj
os.chdir(datDir)
pyrDir = datDir + "/testRplProcPaint Results"
rplFil = datDir + "/paint-vec.rpl"
e0 = 20.0
pc = 1.0
lt = 30.0
det = findDetector("Oxford-Paint")
if(det.toString()[0:4] != 'Oxfo'):
print("You need to add the Oxford-Paint detector. It is using the default...")
# Let's start clean...
DataManager.clearSpectrumList()
rs = ht.openRipple(rplFil, e0, pc, lt, det)
rs.setPosition(111, 35) # go to a position
dt2.display(rs)
ary = wrap(rs).toDouble()
print("Print the min and max")
print(min(ary))
print(max(ary))
dir(rs)
print(dir(rs))
print("Computing maximum pixel spectrum")
mps = wrap(ht.maxPixel(rs))
mps.rename("Paint maximum px")
dt2.display(mps)
# clean up cruft
shutil.rmtree(pyrDir)
print "Done!"
|
[] |
[] |
[
"GIT_HOME"
] |
[]
|
["GIT_HOME"]
|
python
| 1 | 0 | |
storage/sealer/manager_calltracker.go
|
package sealer
import (
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"os"
"time"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/storage/sealer/sealtasks"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
type WorkID struct {
Method sealtasks.TaskType
Params string // json [...params]
}
func (w WorkID) String() string {
return fmt.Sprintf("%s(%s)", w.Method, w.Params)
}
var _ fmt.Stringer = &WorkID{}
type WorkStatus string
const (
wsStarted WorkStatus = "started" // task started, not scheduled/running on a worker yet
wsRunning WorkStatus = "running" // task running on a worker, waiting for worker return
wsDone WorkStatus = "done" // task returned from the worker, results available
)
type WorkState struct {
ID WorkID
Status WorkStatus
WorkerCall storiface.CallID // Set when entering wsRunning
WorkError string // Status = wsDone, set when failed to start work
WorkerHostname string // hostname of last worker handling this job
StartTime int64 // unix seconds
}
func newWorkID(method sealtasks.TaskType, params ...interface{}) (WorkID, error) {
pb, err := json.Marshal(params)
if err != nil {
return WorkID{}, xerrors.Errorf("marshaling work params: %w", err)
}
if len(pb) > 256 {
s := sha256.Sum256(pb)
pb = []byte(hex.EncodeToString(s[:]))
}
return WorkID{
Method: method,
Params: string(pb),
}, nil
}
func (m *Manager) setupWorkTracker() {
m.workLk.Lock()
defer m.workLk.Unlock()
var ids []WorkState
if err := m.work.List(&ids); err != nil {
log.Error("getting work IDs") // quite bad
return
}
for _, st := range ids {
wid := st.ID
if os.Getenv("LOTUS_MINER_ABORT_UNFINISHED_WORK") == "1" {
st.Status = wsDone
}
switch st.Status {
case wsStarted:
log.Warnf("dropping non-running work %s", wid)
if err := m.work.Get(wid).End(); err != nil {
log.Errorf("cleannig up work state for %s", wid)
}
case wsDone:
// can happen after restart, abandoning work, and another restart
log.Warnf("dropping done work, no result, wid %s", wid)
if err := m.work.Get(wid).End(); err != nil {
log.Errorf("cleannig up work state for %s", wid)
}
case wsRunning:
m.callToWork[st.WorkerCall] = wid
}
}
}
// returns wait=true when the task is already tracked/running
func (m *Manager) getWork(ctx context.Context, method sealtasks.TaskType, params ...interface{}) (wid WorkID, wait bool, cancel func(), err error) {
wid, err = newWorkID(method, params)
if err != nil {
return WorkID{}, false, nil, xerrors.Errorf("creating WorkID: %w", err)
}
m.workLk.Lock()
defer m.workLk.Unlock()
have, err := m.work.Has(wid)
if err != nil {
return WorkID{}, false, nil, xerrors.Errorf("failed to check if the task is already tracked: %w", err)
}
if !have {
err := m.work.Begin(wid, &WorkState{
ID: wid,
Status: wsStarted,
})
if err != nil {
return WorkID{}, false, nil, xerrors.Errorf("failed to track task start: %w", err)
}
return wid, false, func() {
m.workLk.Lock()
defer m.workLk.Unlock()
have, err := m.work.Has(wid)
if err != nil {
log.Errorf("cancel: work has error: %+v", err)
return
}
if !have {
return // expected / happy path
}
var ws WorkState
if err := m.work.Get(wid).Get(&ws); err != nil {
log.Errorf("cancel: get work %s: %+v", wid, err)
return
}
switch ws.Status {
case wsStarted:
log.Warnf("canceling started (not running) work %s", wid)
if err := m.work.Get(wid).End(); err != nil {
log.Errorf("cancel: failed to cancel started work %s: %+v", wid, err)
return
}
case wsDone:
// TODO: still remove?
log.Warnf("cancel called on work %s in 'done' state", wid)
case wsRunning:
log.Warnf("cancel called on work %s in 'running' state (manager shutting down?)", wid)
}
}, nil
}
// already started
return wid, true, func() {
// TODO
}, nil
}
func (m *Manager) startWork(ctx context.Context, w Worker, wk WorkID) func(callID storiface.CallID, err error) error {
return func(callID storiface.CallID, err error) error {
var hostname string
info, ierr := w.Info(ctx)
if ierr != nil {
hostname = "[err]"
} else {
hostname = info.Hostname
}
m.workLk.Lock()
defer m.workLk.Unlock()
if err != nil {
merr := m.work.Get(wk).Mutate(func(ws *WorkState) error {
ws.Status = wsDone
ws.WorkError = err.Error()
return nil
})
if merr != nil {
return xerrors.Errorf("failed to start work and to track the error; merr: %+v, err: %w", merr, err)
}
return err
}
err = m.work.Get(wk).Mutate(func(ws *WorkState) error {
_, ok := m.results[wk]
if ok {
log.Warn("work returned before we started tracking it")
ws.Status = wsDone
} else {
ws.Status = wsRunning
}
ws.WorkerCall = callID
ws.WorkerHostname = hostname
ws.StartTime = time.Now().Unix()
return nil
})
if err != nil {
return xerrors.Errorf("registering running work: %w", err)
}
m.callToWork[callID] = wk
return nil
}
}
func (m *Manager) waitWork(ctx context.Context, wid WorkID) (interface{}, error) {
m.workLk.Lock()
var ws WorkState
if err := m.work.Get(wid).Get(&ws); err != nil {
m.workLk.Unlock()
return nil, xerrors.Errorf("getting work status: %w", err)
}
if ws.Status == wsStarted {
m.workLk.Unlock()
return nil, xerrors.Errorf("waitWork called for work in 'started' state")
}
// sanity check
wk := m.callToWork[ws.WorkerCall]
if wk != wid {
m.workLk.Unlock()
return nil, xerrors.Errorf("wrong callToWork mapping for call %s; expected %s, got %s", ws.WorkerCall, wid, wk)
}
// make sure we don't have the result ready
cr, ok := m.callRes[ws.WorkerCall]
if ok {
delete(m.callToWork, ws.WorkerCall)
if len(cr) == 1 {
err := m.work.Get(wk).End()
if err != nil {
m.workLk.Unlock()
// Not great, but not worth discarding potentially multi-hour computation over this
log.Errorf("marking work as done: %+v", err)
}
res := <-cr
delete(m.callRes, ws.WorkerCall)
m.workLk.Unlock()
return res.r, res.err
}
m.workLk.Unlock()
return nil, xerrors.Errorf("something else in waiting on callRes")
}
done := func() {
delete(m.results, wid)
_, ok := m.callToWork[ws.WorkerCall]
if ok {
delete(m.callToWork, ws.WorkerCall)
}
err := m.work.Get(wk).End()
if err != nil {
// Not great, but not worth discarding potentially multi-hour computation over this
log.Errorf("marking work as done: %+v", err)
}
}
// the result can already be there if the work was running, manager restarted,
// and the worker has delivered the result before we entered waitWork
res, ok := m.results[wid]
if ok {
done()
m.workLk.Unlock()
return res.r, res.err
}
ch, ok := m.waitRes[wid]
if !ok {
ch = make(chan struct{})
m.waitRes[wid] = ch
}
m.workLk.Unlock()
select {
case <-ch:
m.workLk.Lock()
defer m.workLk.Unlock()
res := m.results[wid]
done()
return res.r, res.err
case <-ctx.Done():
return nil, xerrors.Errorf("waiting for work result: %w", ctx.Err())
}
}
func (m *Manager) waitSimpleCall(ctx context.Context) func(callID storiface.CallID, err error) (interface{}, error) {
return func(callID storiface.CallID, err error) (interface{}, error) {
if err != nil {
return nil, err
}
return m.waitCall(ctx, callID)
}
}
func (m *Manager) waitCall(ctx context.Context, callID storiface.CallID) (interface{}, error) {
m.workLk.Lock()
_, ok := m.callToWork[callID]
if ok {
m.workLk.Unlock()
return nil, xerrors.Errorf("can't wait for calls related to work")
}
ch, ok := m.callRes[callID]
if !ok {
ch = make(chan result, 1)
m.callRes[callID] = ch
}
m.workLk.Unlock()
defer func() {
m.workLk.Lock()
defer m.workLk.Unlock()
delete(m.callRes, callID)
}()
select {
case res := <-ch:
return res.r, res.err
case <-ctx.Done():
return nil, xerrors.Errorf("waiting for call result: %w", ctx.Err())
}
}
func (m *Manager) returnResult(ctx context.Context, callID storiface.CallID, r interface{}, cerr *storiface.CallError) error {
res := result{
r: r,
}
if cerr != nil {
res.err = cerr
}
m.sched.workTracker.onDone(ctx, callID)
m.workLk.Lock()
defer m.workLk.Unlock()
wid, ok := m.callToWork[callID]
if !ok {
rch, ok := m.callRes[callID]
if !ok {
rch = make(chan result, 1)
m.callRes[callID] = rch
}
if len(rch) > 0 {
return xerrors.Errorf("callRes channel already has a response")
}
if cap(rch) == 0 {
return xerrors.Errorf("expected rch to be buffered")
}
rch <- res
return nil
}
_, ok = m.results[wid]
if ok {
return xerrors.Errorf("result for call %v already reported", wid)
}
m.results[wid] = res
err := m.work.Get(wid).Mutate(func(ws *WorkState) error {
ws.Status = wsDone
return nil
})
if err != nil {
// in the unlikely case:
// * manager has restarted, and we're still tracking this work, and
// * the work is abandoned (storage-fsm doesn't do a matching call on the sector), and
// * the call is returned from the worker, and
// * this errors
// the user will get jobs stuck in ret-wait state
log.Errorf("marking work as done: %+v", err)
}
_, found := m.waitRes[wid]
if found {
close(m.waitRes[wid])
delete(m.waitRes, wid)
}
return nil
}
func (m *Manager) Abort(ctx context.Context, call storiface.CallID) error {
// TODO: Allow temp error
return m.returnResult(ctx, call, nil, storiface.Err(storiface.ErrUnknown, xerrors.New("task aborted")))
}
|
[
"\"LOTUS_MINER_ABORT_UNFINISHED_WORK\""
] |
[] |
[
"LOTUS_MINER_ABORT_UNFINISHED_WORK"
] |
[]
|
["LOTUS_MINER_ABORT_UNFINISHED_WORK"]
|
go
| 1 | 0 | |
embed_video/tests/__init__.py
|
import os
import django
os.environ['DJANGO_SETTINGS_MODULE'] = 'embed_video.tests.django_settings'
if django.VERSION[:2] >= (1, 7):
django.setup()
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
internal/glfw/glfw_windows.go
|
// Copyright 2018 The Ebiten Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package glfw
import (
"image"
"image/draw"
"math/bits"
"reflect"
"runtime"
"sync"
"unsafe"
"golang.org/x/sys/windows"
)
type glfwImage struct {
width int32
height int32
pixels uintptr
}
type glfwWindows map[uintptr]*Window
var (
theGLFWWindows = glfwWindows{}
glfwWindowsM sync.Mutex
)
func (w glfwWindows) add(win uintptr) *Window {
if win == 0 {
return nil
}
ww := &Window{w: win}
glfwWindowsM.Lock()
w[win] = ww
glfwWindowsM.Unlock()
return ww
}
func (w glfwWindows) remove(win uintptr) {
glfwWindowsM.Lock()
delete(w, win)
glfwWindowsM.Unlock()
}
func (w glfwWindows) get(win uintptr) *Window {
if win == 0 {
return nil
}
glfwWindowsM.Lock()
ww := w[win]
glfwWindowsM.Unlock()
return ww
}
type Cursor struct {
c uintptr
}
func CreateStandardCursor(shape StandardCursor) *Cursor {
c := glfwDLL.call("glfwCreateStandardCursor", uintptr(shape))
panicError()
return &Cursor{c: c}
}
type Monitor struct {
m uintptr
}
func (m *Monitor) GetPos() (int, int) {
var x, y int32
glfwDLL.call("glfwGetMonitorPos", m.m, uintptr(unsafe.Pointer(&x)), uintptr(unsafe.Pointer(&y)))
panicError()
return int(x), int(y)
}
func (m *Monitor) GetVideoMode() *VidMode {
v := glfwDLL.call("glfwGetVideoMode", m.m)
panicError()
var vals []int32
h := (*reflect.SliceHeader)(unsafe.Pointer(&vals))
h.Data = v
h.Len = 6
h.Cap = 6
return &VidMode{
Width: int(vals[0]),
Height: int(vals[1]),
RedBits: int(vals[2]),
GreenBits: int(vals[3]),
BlueBits: int(vals[4]),
RefreshRate: int(vals[5]),
}
}
type Window struct {
w uintptr
prevSizeCallback SizeCallback
}
func (w *Window) Destroy() {
glfwDLL.call("glfwDestroyWindow", w.w)
panicError()
theGLFWWindows.remove(w.w)
}
func (w *Window) GetAttrib(attrib Hint) int {
r := glfwDLL.call("glfwGetWindowAttrib", w.w, uintptr(attrib))
panicError()
return int(r)
}
func (w *Window) SetAttrib(attrib Hint, value int) {
glfwDLL.call("glfwSetWindowAttrib", w.w, uintptr(attrib), uintptr(value))
panicError()
}
func (w *Window) GetCursorPos() (x, y float64) {
glfwDLL.call("glfwGetCursorPos", w.w, uintptr(unsafe.Pointer(&x)), uintptr(unsafe.Pointer(&y)))
panicError()
return
}
func (w *Window) GetInputMode(mode InputMode) int {
r := glfwDLL.call("glfwGetInputMode", w.w, uintptr(mode))
panicError()
return int(r)
}
func (w *Window) GetKey(key Key) Action {
r := glfwDLL.call("glfwGetKey", w.w, uintptr(key))
panicError()
return Action(r)
}
func (w *Window) GetMonitor() *Monitor {
m := glfwDLL.call("glfwGetWindowMonitor", w.w)
panicError()
if m == 0 {
return nil
}
return &Monitor{m}
}
func (w *Window) GetMouseButton(button MouseButton) Action {
r := glfwDLL.call("glfwGetMouseButton", w.w, uintptr(button))
panicError()
return Action(r)
}
func (w *Window) GetPos() (int, int) {
var x, y int32
glfwDLL.call("glfwGetWindowPos", w.w, uintptr(unsafe.Pointer(&x)), uintptr(unsafe.Pointer(&y)))
panicError()
return int(x), int(y)
}
func (w *Window) GetSize() (int, int) {
var width, height int32
glfwDLL.call("glfwGetWindowSize", w.w, uintptr(unsafe.Pointer(&width)), uintptr(unsafe.Pointer(&height)))
panicError()
return int(width), int(height)
}
func (w *Window) Iconify() {
glfwDLL.call("glfwIconifyWindow", w.w)
panicError()
}
func (w *Window) MakeContextCurrent() {
glfwDLL.call("glfwMakeContextCurrent", w.w)
panicError()
}
func (w *Window) Maximize() {
glfwDLL.call("glfwMaximizeWindow", w.w)
panicError()
}
func (w *Window) Restore() {
glfwDLL.call("glfwRestoreWindow", w.w)
panicError()
}
func (w *Window) SetCharModsCallback(cbfun CharModsCallback) (previous CharModsCallback) {
glfwDLL.call("glfwSetCharModsCallback", w.w, uintptr(cbfun))
panicError()
return ToCharModsCallback(nil) // TODO
}
func (w *Window) SetCloseCallback(cbfun CloseCallback) (previous CloseCallback) {
glfwDLL.call("glfwSetWindowCloseCallback", w.w, uintptr(cbfun))
panicError()
return ToCloseCallback(nil) // TODO
}
func (w *Window) SetCursor(cursor *Cursor) {
var c uintptr
if cursor != nil {
c = cursor.c
}
glfwDLL.call("glfwSetCursor", w.w, c)
}
func (w *Window) SetFramebufferSizeCallback(cbfun FramebufferSizeCallback) (previous FramebufferSizeCallback) {
glfwDLL.call("glfwSetFramebufferSizeCallback", w.w, uintptr(cbfun))
panicError()
return ToFramebufferSizeCallback(nil) // TODO
}
func (w *Window) SetScrollCallback(cbfun ScrollCallback) (previous ScrollCallback) {
glfwDLL.call("glfwSetScrollCallback", w.w, uintptr(cbfun))
panicError()
return ToScrollCallback(nil) // TODO
}
func (w *Window) SetShouldClose(value bool) {
var v uintptr = False
if value {
v = True
}
glfwDLL.call("glfwSetWindowShouldClose", w.w, v)
panicError()
}
func (w *Window) SetSizeCallback(cbfun SizeCallback) (previous SizeCallback) {
glfwDLL.call("glfwSetWindowSizeCallback", w.w, uintptr(cbfun))
panicError()
prev := w.prevSizeCallback
w.prevSizeCallback = cbfun
return prev
}
func (w *Window) SetSizeLimits(minw, minh, maxw, maxh int) {
glfwDLL.call("glfwSetWindowSizeLimits", w.w, uintptr(minw), uintptr(minh), uintptr(maxw), uintptr(maxh))
panicError()
}
func (w *Window) SetIcon(images []image.Image) {
gimgs := make([]glfwImage, len(images))
defer runtime.KeepAlive(gimgs)
for i, img := range images {
b := img.Bounds()
m := image.NewNRGBA(image.Rect(0, 0, b.Dx(), b.Dy()))
draw.Draw(m, m.Bounds(), img, b.Min, draw.Src)
gimgs[i].width = int32(b.Dx())
gimgs[i].height = int32(b.Dy())
gimgs[i].pixels = uintptr(unsafe.Pointer(&m.Pix[0]))
}
glfwDLL.call("glfwSetWindowIcon", w.w, uintptr(len(gimgs)), uintptr(unsafe.Pointer(&gimgs[0])))
panicError()
}
func (w *Window) SetInputMode(mode InputMode, value int) {
glfwDLL.call("glfwSetInputMode", w.w, uintptr(mode), uintptr(value))
panicError()
}
func (w *Window) SetMonitor(monitor *Monitor, xpos, ypos, width, height, refreshRate int) {
var m uintptr
if monitor != nil {
m = monitor.m
}
glfwDLL.call("glfwSetWindowMonitor", w.w, m, uintptr(xpos), uintptr(ypos), uintptr(width), uintptr(height), uintptr(refreshRate))
panicError()
}
func (w *Window) SetPos(xpos, ypos int) {
glfwDLL.call("glfwSetWindowPos", w.w, uintptr(xpos), uintptr(ypos))
panicError()
}
func (w *Window) SetSize(width, height int) {
glfwDLL.call("glfwSetWindowSize", w.w, uintptr(width), uintptr(height))
panicError()
}
func (w *Window) SetTitle(title string) {
s := []byte(title)
s = append(s, 0)
defer runtime.KeepAlive(s)
glfwDLL.call("glfwSetWindowTitle", w.w, uintptr(unsafe.Pointer(&s[0])))
panicError()
}
func (w *Window) ShouldClose() bool {
r := glfwDLL.call("glfwWindowShouldClose", w.w)
panicError()
return byte(r) == True
}
func (w *Window) Show() {
glfwDLL.call("glfwShowWindow", w.w)
panicError()
}
func (w *Window) SwapBuffers() {
glfwDLL.call("glfwSwapBuffers", w.w)
panicError()
}
func CreateWindow(width, height int, title string, monitor *Monitor, share *Window) (*Window, error) {
s := []byte(title)
s = append(s, 0)
defer runtime.KeepAlive(s)
var gm uintptr
if monitor != nil {
gm = monitor.m
}
var gw uintptr
if share != nil {
gw = share.w
}
w := glfwDLL.call("glfwCreateWindow", uintptr(width), uintptr(height), uintptr(unsafe.Pointer(&s[0])), gm, gw)
if w == 0 {
return nil, acceptError(APIUnavailable, VersionUnavailable)
}
return theGLFWWindows.add(w), nil
}
func (j Joystick) GetGUID() string {
ptr := glfwDLL.call("glfwGetJoystickGUID", uintptr(j))
panicError()
// ptr can be nil after disconnecting the joystick.
if ptr == 0 {
return ""
}
var backed [256]byte
as := backed[:0]
for i := int32(0); ; i++ {
b := *(*byte)(unsafe.Pointer(ptr))
ptr += unsafe.Sizeof(byte(0))
if b == 0 {
break
}
as = append(as, b)
}
r := string(as)
return r
}
func (j Joystick) GetName() string {
ptr := glfwDLL.call("glfwGetJoystickName", uintptr(j))
panicError()
// ptr can be nil after disconnecting the joystick.
if ptr == 0 {
return ""
}
var backed [256]byte
as := backed[:0]
for i := int32(0); ; i++ {
b := *(*byte)(unsafe.Pointer(ptr))
ptr += unsafe.Sizeof(byte(0))
if b == 0 {
break
}
as = append(as, b)
}
r := string(as)
return r
}
func (j Joystick) GetAxes() []float32 {
var l int32
ptr := glfwDLL.call("glfwGetJoystickAxes", uintptr(j), uintptr(unsafe.Pointer(&l)))
panicError()
// ptr can be nil after disconnecting the joystick.
if ptr == 0 {
return nil
}
as := make([]float32, l)
for i := int32(0); i < l; i++ {
as[i] = *(*float32)(unsafe.Pointer(ptr))
ptr += unsafe.Sizeof(float32(0))
}
return as
}
func (j Joystick) GetButtons() []byte {
var l int32
ptr := glfwDLL.call("glfwGetJoystickButtons", uintptr(j), uintptr(unsafe.Pointer(&l)))
panicError()
// ptr can be nil after disconnecting the joystick.
if ptr == 0 {
return nil
}
bs := make([]byte, l)
for i := int32(0); i < l; i++ {
bs[i] = *(*byte)(unsafe.Pointer(ptr))
ptr++
}
return bs
}
func (j Joystick) GetHats() []JoystickHatState {
var l int32
ptr := glfwDLL.call("glfwGetJoystickHats", uintptr(j), uintptr(unsafe.Pointer(&l)))
panicError()
// ptr can be nil after disconnecting the joystick.
if ptr == 0 {
return nil
}
hats := make([]JoystickHatState, l)
for i := int32(0); i < l; i++ {
hats[i] = *(*JoystickHatState)(unsafe.Pointer(ptr))
ptr++
}
return hats
}
func (j Joystick) GetGamepadState() *GamepadState {
var s struct {
Buttons [15]uint8
Axes [6]float32
}
r := glfwDLL.call("glfwGetGamepadState", uintptr(j), uintptr(unsafe.Pointer(&s)))
panicError()
if r != True {
return nil
}
state := &GamepadState{}
for i, b := range s.Buttons {
state.Buttons[i] = Action(b)
}
copy(state.Axes[:], s.Axes[:])
return state
}
func GetMonitors() []*Monitor {
var l int32
ptr := glfwDLL.call("glfwGetMonitors", uintptr(unsafe.Pointer(&l)))
panicError()
ms := make([]*Monitor, l)
for i := int32(0); i < l; i++ {
m := *(*unsafe.Pointer)(unsafe.Pointer(ptr))
if m != nil {
ms[i] = &Monitor{uintptr(m)}
}
ptr += bits.UintSize / 8
}
return ms
}
func GetPrimaryMonitor() *Monitor {
m := glfwDLL.call("glfwGetPrimaryMonitor")
panicError()
if m == 0 {
return nil
}
return &Monitor{m}
}
func Init() error {
glfwDLL.call("glfwInit")
// InvalidValue can happen when specific joysticks are used. This issue
// will be fixed in GLFW 3.3.5. As a temporary fix, ignore this error.
// See go-gl/glfw#292, go-gl/glfw#324, and glfw/glfw#1763
// (#1229).
err := acceptError(APIUnavailable, InvalidValue)
if e, ok := err.(*glfwError); ok && e.code == InvalidValue {
return nil
}
return err
}
func (j Joystick) Present() bool {
r := glfwDLL.call("glfwJoystickPresent", uintptr(j))
panicError()
return byte(r) == True
}
func panicErrorExceptForInvalidValue() {
// InvalidValue can happen when specific joysticks are used. This issue
// will be fixed in GLFW 3.3.5. As a temporary fix, ignore this error.
// See go-gl/glfw#292, go-gl/glfw#324, and glfw/glfw#1763
// (#1229).
err := acceptError(InvalidValue)
if e, ok := err.(*glfwError); ok && e.code == InvalidValue {
return
}
if err != nil {
panic(err)
}
}
func PollEvents() {
glfwDLL.call("glfwPollEvents")
// This should be used for WaitEvents and WaitEventsTimeout if needed.
panicErrorExceptForInvalidValue()
}
func PostEmptyEvent() {
glfwDLL.call("glfwPostEmptyEvent")
panicError()
}
func SetMonitorCallback(cbfun func(monitor *Monitor, event PeripheralEvent)) {
var gcb uintptr
if cbfun != nil {
gcb = windows.NewCallbackCDecl(func(monitor uintptr, event PeripheralEvent) uintptr {
var m *Monitor
if monitor != 0 {
m = &Monitor{monitor}
}
cbfun(m, event)
return 0
})
}
glfwDLL.call("glfwSetMonitorCallback", gcb)
panicError()
}
func SwapInterval(interval int) {
glfwDLL.call("glfwSwapInterval", uintptr(interval))
panicError()
}
func Terminate() {
flushErrors()
glfwDLL.call("glfwTerminate")
if err := glfwDLL.unload(); err != nil {
panic(err)
}
}
func UpdateGamepadMappings(mapping string) bool {
m := append([]byte(mapping), 0)
defer runtime.KeepAlive(m)
r := glfwDLL.call("glfwUpdateGamepadMappings", uintptr(unsafe.Pointer(&m[0])))
panicError()
return byte(r) == True
}
func WaitEvents() {
glfwDLL.call("glfwWaitEvents")
panicError()
}
func WindowHint(target Hint, hint int) {
glfwDLL.call("glfwWindowHint", uintptr(target), uintptr(hint))
panicError()
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
iot-web-platform/wsgi.py
|
"""
WSGI config for auth_test project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'auth_test.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
services/dataworks-public/create_meta_category.go
|
package dataworks_public
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
// CreateMetaCategory invokes the dataworks_public.CreateMetaCategory API synchronously
func (client *Client) CreateMetaCategory(request *CreateMetaCategoryRequest) (response *CreateMetaCategoryResponse, err error) {
response = CreateCreateMetaCategoryResponse()
err = client.DoAction(request, response)
return
}
// CreateMetaCategoryWithChan invokes the dataworks_public.CreateMetaCategory API asynchronously
func (client *Client) CreateMetaCategoryWithChan(request *CreateMetaCategoryRequest) (<-chan *CreateMetaCategoryResponse, <-chan error) {
responseChan := make(chan *CreateMetaCategoryResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.CreateMetaCategory(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// CreateMetaCategoryWithCallback invokes the dataworks_public.CreateMetaCategory API asynchronously
func (client *Client) CreateMetaCategoryWithCallback(request *CreateMetaCategoryRequest, callback func(response *CreateMetaCategoryResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *CreateMetaCategoryResponse
var err error
defer close(result)
response, err = client.CreateMetaCategory(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// CreateMetaCategoryRequest is the request struct for api CreateMetaCategory
type CreateMetaCategoryRequest struct {
*requests.RpcRequest
Name string `position:"Body" name:"Name"`
Comment string `position:"Body" name:"Comment"`
ParentId requests.Integer `position:"Body" name:"ParentId"`
}
// CreateMetaCategoryResponse is the response struct for api CreateMetaCategory
type CreateMetaCategoryResponse struct {
*responses.BaseResponse
RequestId string `json:"RequestId" xml:"RequestId"`
ErrorCode string `json:"ErrorCode" xml:"ErrorCode"`
ErrorMessage string `json:"ErrorMessage" xml:"ErrorMessage"`
HttpStatusCode int `json:"HttpStatusCode" xml:"HttpStatusCode"`
Success bool `json:"Success" xml:"Success"`
Data Data `json:"Data" xml:"Data"`
}
// CreateCreateMetaCategoryRequest creates a request to invoke CreateMetaCategory API
func CreateCreateMetaCategoryRequest() (request *CreateMetaCategoryRequest) {
request = &CreateMetaCategoryRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("dataworks-public", "2020-05-18", "CreateMetaCategory", "", "")
request.Method = requests.POST
return
}
// CreateCreateMetaCategoryResponse creates a response to parse from CreateMetaCategory response
func CreateCreateMetaCategoryResponse() (response *CreateMetaCategoryResponse) {
response = &CreateMetaCategoryResponse{
BaseResponse: &responses.BaseResponse{},
}
return
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
storage/default.go
|
package storage
import (
"context"
"fmt"
"github.com/joho/godotenv"
"gitlab.com/gitlab-red-team/cve-2021-22205-hash-harvester/tags"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"os"
"strings"
"sync"
)
var mongoOnce sync.Once
var clientInstance *mongo.Client
var clientInstanceError error
var mongoURI string
const (
DbName = "cve_2021_22205"
CollectionName = "tags"
)
func getMongoClient() (*mongo.Client, error) {
mongoOnce.Do(func() {
mongoURI = getMongodbURI()
clientOptions := options.Client().ApplyURI(mongoURI)
client, err := mongo.Connect(context.TODO(), clientOptions)
if err != nil {
clientInstanceError = err
}
err = client.Ping(context.TODO(), nil)
if err != nil {
clientInstanceError = err
}
clientInstance = client
})
return clientInstance, clientInstanceError
}
func getMongodbURI() string {
if err := godotenv.Load(); err != nil {
panic("[!] No .env file found!")
}
return os.Getenv("MONGODB_URI")
}
func Save(tag tags.Tag) {
storageErr := createTag(tag)
if storageErr != nil {
panic(storageErr)
}
fmt.Printf("[*] Tag %s with hash %s stored successfully.\n", tag.Name, tag.Hash)
}
func TagExists(tag tags.Tag) bool {
retrievedTag, err := getTag(tag)
if err != nil {
if !strings.Contains(err.Error(), "no documents in result") {
panic(err)
}
}
if retrievedTag.Hash == "" {
return false
}
fmt.Printf("[*] Tag %s with hash %s already stored. Moving on.\n", retrievedTag.Name, retrievedTag.Hash)
return true
}
func getTag(tag tags.Tag) (tags.Tag, error) {
result := tags.Tag{}
filter := bson.M{"name": tag.Name}
client, err := getMongoClient()
if err != nil {
return result, err
}
collection := client.Database(DbName).Collection(CollectionName)
err = collection.FindOne(context.TODO(), filter).Decode(&result)
if err != nil {
return result, err
}
return result, nil
}
func createTag(tag tags.Tag) error {
client, err := getMongoClient()
if err != nil {
return err
}
collection := client.Database(DbName).Collection(CollectionName)
_, err = collection.InsertOne(context.TODO(), tag)
if err != nil {
return err
}
return nil
}
func GetDistinctHashes() ([]interface{}, error) {
var result []interface{}
client, err := getMongoClient()
if err != nil {
return result, err
}
collection := client.Database(DbName).Collection(CollectionName)
return collection.Distinct(context.TODO(), "hash", bson.D{{}})
}
|
[
"\"MONGODB_URI\""
] |
[] |
[
"MONGODB_URI"
] |
[]
|
["MONGODB_URI"]
|
go
| 1 | 0 | |
examples/txnkv/pessimistic_txn/pessimistic_txn.go
|
// Copyright 2021 TiKV Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"flag"
"fmt"
"os"
"time"
"github.com/tikv/client-go/v2/kv"
"github.com/tikv/client-go/v2/tikv"
)
// KV represents a Key-Value pair.
type KV struct {
K, V []byte
}
func (kv KV) String() string {
return fmt.Sprintf("%s => %s (%v)", kv.K, kv.V, kv.V)
}
var (
client *tikv.KVStore
pdAddr = flag.String("pd", "127.0.0.1:2379", "pd address")
)
// Init initializes information.
func initStore() {
var err error
client, err = tikv.NewTxnClient([]string{*pdAddr})
if err != nil {
panic(err)
}
}
func begin_pessimistic_txn() (txn *tikv.KVTxn) {
txn, err := client.Begin()
if err != nil {
panic(err)
}
txn.SetPessimistic(true)
return txn
}
func exampleForPessimisticTXN() {
// k1 is the primary lock of txn1
k1 := []byte("k1")
// k2 is a secondary lock of txn1 and a key txn2 wants to lock
k2 := []byte("k2")
txn1 := begin_pessimistic_txn()
//txn1: lock the primary key
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, k1)
if err != nil {
panic(err)
}
fmt.Println("txn1: lock k1 success.")
// txn1:lock the secondary key
lockCtx = &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err = txn1.LockKeys(context.Background(), lockCtx, k2)
if err != nil {
panic(err)
}
fmt.Println("txn1: lock k2 success.")
// begin txn2
txn2 := begin_pessimistic_txn()
// txn2: lock k2 no wait
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: tikv.LockNoWait, WaitStartTime: time.Now()}
err = txn2.LockKeys(context.Background(), lockCtx, k2)
// cannot acquire lock immediately thus error:ErrLockAcquireFailAndNoWaitSet
fmt.Println("txn2: acquire lock for k2 (while txn1 has this lock) should be failed with error: ", err)
// txn2:lock k2 for wait limited time (200ms),less than k2's lock TTL by txn1,should failed with timeout.
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: 200, WaitStartTime: time.Now()}
err = txn2.LockKeys(context.Background(), lockCtx, k2)
// txn2: cannot acquire lock k2 in time should failed with timeout.
fmt.Println("txn2: acquire lock for k1(while txn1 has this lock) should be failed with error: ", err)
// commit txn1 should be success.
txn1.Set(k1, k1)
err = txn1.Commit(context.Background())
if err != nil {
panic(err)
} else {
fmt.Println("tnx1: commit txn1 success!")
}
// txn2:try to lock k2 no wait & with the old ForUpdateTS should be failed.
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: tikv.LockNoWait, WaitStartTime: time.Now()}
err = txn2.LockKeys(context.Background(), lockCtx, k2)
// cannot acquire lock , should meet conflict.
fmt.Println("txn2: acquire lock for k2 should failed with error(confict): ", err)
lockCtx.ForUpdateTS, err = tikv.ExtractStartTS(client, tikv.DefaultStartTSOption())
fmt.Println("txn2: get current start ts as forupdate ts should success:", err)
// txn2: lock k2 in txn2 with new forUpdateTS should success.
err = txn2.LockKeys(context.Background(), lockCtx, k2)
if err != nil {
// cannot acquire lock , should success.
fmt.Println("txn2: acquire lock for k2 with new forUpdateTS should be success while meet err:", err)
} else {
fmt.Println("txn2: acquire lock for k2 with new forUpdateTS success!")
}
// txn2: do some write.
txn2.Set(k1, k1)
txn2.Set(k2, k2)
txn2.Delete(k1)
txn2.Delete(k2)
// commit txn2 should success.
err = txn2.Commit(context.Background())
if err != nil {
fmt.Println("txn2: commit should success while meet err ", err)
} else {
fmt.Println("txn2: commit success.")
}
}
func main() {
pdAddr := os.Getenv("PD_ADDR")
if pdAddr != "" {
os.Args = append(os.Args, "-pd", pdAddr)
}
flag.Parse()
initStore()
exampleForPessimisticTXN()
}
|
[
"\"PD_ADDR\""
] |
[] |
[
"PD_ADDR"
] |
[]
|
["PD_ADDR"]
|
go
| 1 | 0 | |
jenkins-build-driver/src/test/java/org/jboss/pnc/jenkinsbuilddriver/test/JenkinsDriverRemoteTest.java
|
/**
* JBoss, Home of Professional Open Source.
* Copyright 2014 Red Hat, Inc., and individual contributors
* as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jboss.pnc.jenkinsbuilddriver.test;
import org.jboss.arquillian.container.test.api.Deployment;
import org.jboss.arquillian.junit.Arquillian;
import org.jboss.pnc.common.Configuration;
import org.jboss.pnc.common.util.ObjectWrapper;
import org.jboss.pnc.jenkinsbuilddriver.JenkinsBuildDriver;
import org.jboss.pnc.model.Artifact;
import org.jboss.pnc.model.BuildConfigurationAudited;
import org.jboss.pnc.model.Project;
import org.jboss.pnc.model.RepositoryType;
import org.jboss.pnc.spi.BuildExecution;
import org.jboss.pnc.spi.builddriver.BuildDriverResult;
import org.jboss.pnc.spi.builddriver.BuildDriverStatus;
import org.jboss.pnc.spi.builddriver.CompletedBuild;
import org.jboss.pnc.spi.builddriver.RunningBuild;
import org.jboss.pnc.spi.builddriver.exception.BuildDriverException;
import org.jboss.pnc.spi.environment.RunningEnvironment;
import org.jboss.pnc.spi.environment.exception.EnvironmentDriverException;
import org.jboss.pnc.spi.repositorymanager.RepositoryManagerException;
import org.jboss.pnc.spi.repositorymanager.RepositoryManagerResult;
import org.jboss.pnc.spi.repositorymanager.model.RepositoryConnectionInfo;
import org.jboss.pnc.spi.repositorymanager.model.RepositorySession;
import org.jboss.pnc.test.category.RemoteTest;
import org.jboss.shrinkwrap.api.ShrinkWrap;
import org.jboss.shrinkwrap.api.asset.EmptyAsset;
import org.jboss.shrinkwrap.api.spec.JavaArchive;
import org.junit.Assert;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import javax.inject.Inject;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.logging.Logger;
import static org.mockito.Mockito.mock;
/**
* Created by <a href="mailto:[email protected]">Matej Lazar</a> on 2014-11-23.
*/
@RunWith(Arquillian.class)
@Category(RemoteTest.class)
@Ignore("Needs to connect to non existing Jenkins server")
public class JenkinsDriverRemoteTest {
private static final Logger log = Logger.getLogger(JenkinsDriverRemoteTest.class.getName());
@Deployment
public static JavaArchive createDeployment() {
JavaArchive jar = ShrinkWrap.create(JavaArchive.class).addAsManifestResource(EmptyAsset.INSTANCE, "beans.xml")
.addAsResource("META-INF/logging.properties").addAsResource("freeform-job-template.xml")
.addPackages(true, org.apache.http.client.HttpResponseException.class.getPackage())
.addPackages(true, Configuration.class.getPackage())
.addPackage(JenkinsBuildDriver.class.getPackage());
System.out.println(jar.toString(true));
return jar;
}
@Inject
JenkinsBuildDriver jenkinsBuildDriver;
@Test
//@Ignore("To be fixed by NCL-554")
public void startJenkinsJobTestCase() throws Exception {
BuildConfigurationAudited pbc = getBuildConfiguration();
RunningEnvironment runningEnvironment = getRunningEnvironment();
final Semaphore mutex = new Semaphore(1);
ObjectWrapper<Boolean> completed = new ObjectWrapper<>(false);
ObjectWrapper<BuildDriverResult> resultWrapper = new ObjectWrapper<>();
ObjectWrapper<Long> buildStarted = new ObjectWrapper<>();
ObjectWrapper<Long> buildTook = new ObjectWrapper<>();
class BuildTask {
CompletedBuild buildJobDetails;
}
final BuildTask buildTask = new BuildTask();
Consumer<CompletedBuild> onComplete = (completedBuild) -> {
buildTask.buildJobDetails = completedBuild;
completed.set(true);
buildTook.set(System.currentTimeMillis() - buildStarted.get());
log.info("Received build completed in " + buildTook.get() + "ms.");
try {
resultWrapper.set(completedBuild.getBuildResult());
} catch (BuildDriverException e) {
throw new AssertionError("Cannot get build result.", e);
}
mutex.release();
};
Consumer<Throwable> onError = (e) -> {
throw new AssertionError(e);
};
mutex.acquire();
RunningBuild runningBuild = jenkinsBuildDriver.startProjectBuild(mock(BuildExecution.class), pbc, runningEnvironment);
buildStarted.set(System.currentTimeMillis());
runningBuild.monitor(onComplete, onError);
mutex.tryAcquire(60, TimeUnit.SECONDS); // wait for callback to release
Assert.assertTrue("There was no complete callback.", completed.get());
Assert.assertNotNull(buildTask.buildJobDetails);
long minBuildTime = 5000;
Assert.assertTrue("Received build completed in " + buildTook.get() + " while expected >" + minBuildTime + ".",
buildTook.get() >= minBuildTime);
BuildDriverResult buildDriverResult = resultWrapper.get();
Assert.assertEquals(BuildDriverStatus.SUCCESS, buildDriverResult.getBuildDriverStatus());
Assert.assertTrue("Incomplete build log.", buildDriverResult.getBuildLog().contains("Building in workspace"));
Assert.assertTrue("Incomplete build log.", buildDriverResult.getBuildLog().contains("Finished: SUCCESS"));
Assert.assertTrue("There was no complete callback.", completed.get());
}
private RunningEnvironment getRunningEnvironment() {
final RepositorySession repositoryConfiguration = getRepositoryConfiguration();
return new RunningEnvironment() {
@Override
public RepositorySession getRepositorySession() {
return repositoryConfiguration;
}
@Override
public Path getWorkingDirectory() {
try {
Path tempDirectory = Files.createTempDirectory("JenkinsDriverRemoteTest");
tempDirectory.toFile().deleteOnExit();
return tempDirectory;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public String getJenkinsUrl() {
return System.getenv("PNC_JENKINS_URL") + ":" + getJenkinsPort();
}
@Override
public String getInternalBuildAgentUrl() {
return getJenkinsUrl();
}
@Override
public int getJenkinsPort() {
return Integer.parseInt(System.getenv("PNC_JENKINS_PORT"));
}
@Override
public String getId() {
return null;
}
@Override
public void destroyEnvironment() throws EnvironmentDriverException {
}
};
}
private RepositorySession getRepositoryConfiguration() {
return new RepositorySession() {
@Override
public RepositoryType getType() {
return RepositoryType.MAVEN;
}
@Override
public String getBuildRepositoryId() {
return "mock-config";
}
@Override
public RepositoryConnectionInfo getConnectionInfo() {
return new RepositoryConnectionInfo() {
@Override
public String getDependencyUrl() {
return "https://repository.jboss.org/nexus/content/repositories/central";
}
@Override
public String getToolchainUrl() {
return null;
}
@Override
public String getDeployUrl() {
return null;
}
@Override
public Map<String, String> getProperties() {
return null;
}
};
}
@Override
public RepositoryManagerResult extractBuildArtifacts() throws RepositoryManagerException {
return new RepositoryManagerResult() {
@Override
public List<Artifact> getBuiltArtifacts() {
List<Artifact> builtArtifacts = Collections.emptyList();
builtArtifacts.add(getArtifact(1));
return builtArtifacts;
}
@Override
public List<Artifact> getDependencies() {
List<Artifact> dependencies = Collections.emptyList();
dependencies.add(getArtifact(10));
return dependencies;
}
};
}
private Artifact getArtifact(int i) {
Artifact artifact = new Artifact();
artifact.setId(i);
artifact.setIdentifier("test" + i);
return artifact;
}
};
}
private BuildConfigurationAudited getBuildConfiguration() {
BuildConfigurationAudited pbc = new BuildConfigurationAudited();
pbc.setScmRepoURL("https://github.com/project-ncl/pnc.git");
pbc.setScmRevision("*/master"); // this is default
pbc.setBuildScript("mvn validate");
pbc.setName("PNC-executed-from-jenkins-driver-test");
Project project = new Project();
project.setName("PNC-executed-from-jenkins-driver-test");
pbc.setProject(project);
return pbc;
}
}
|
[
"\"PNC_JENKINS_URL\"",
"\"PNC_JENKINS_PORT\""
] |
[] |
[
"PNC_JENKINS_URL",
"PNC_JENKINS_PORT"
] |
[]
|
["PNC_JENKINS_URL", "PNC_JENKINS_PORT"]
|
java
| 2 | 0 | |
examples/batch_history.py
|
import os
import blockchyp
# initialize a client.
client = blockchyp.Client(
api_key=os.environ["BC_API_KEY"],
bearer_token=os.environ["BC_BEARER_TOKEN"],
signing_key=os.environ["BC_SIGNING_KEY"],
)
# populate request parameters.
request = {
"maxResults": 250,
"startIndex": 1,
}
# run the transaction.
response = client.batch_history(request)
print("Response: %r" % response)
|
[] |
[] |
[
"BC_BEARER_TOKEN",
"BC_API_KEY",
"BC_SIGNING_KEY"
] |
[]
|
["BC_BEARER_TOKEN", "BC_API_KEY", "BC_SIGNING_KEY"]
|
python
| 3 | 0 | |
vta/scripts/tune_conv2d_transpose.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tuning a single conv2d transpose operator"""
from collections import namedtuple
import logging
import os
import tvm
from tvm import autotvm
import topi
import vta
import vta.testing
# Get batch info from env
env = vta.get_env()
Workload = namedtuple("Conv2DTransposeWorkload",
['batch', 'height', 'width', 'in_filter', 'out_filter',
'hkernel', 'wkernel', 'hpad', 'wpad', 'hstride', 'wstride'])
dcgan_wkls = [
# dcgan
('DCGAN.CT1', Workload(env.BATCH, 4, 4, 1024, 512, 4, 4, 1, 1, 2, 2)),
('DCGAN.CT2', Workload(env.BATCH, 8, 8, 512, 256, 4, 4, 1, 1, 2, 2)),
('DCGAN.CT3', Workload(env.BATCH, 16, 16, 256, 128, 4, 4, 1, 1, 2, 2)),
]
@tvm.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.const(a_min, x.dtype)
const_max = tvm.const(a_max, x.dtype)
x = tvm.compute(x.shape, lambda *i: tvm.min(x(*i), const_max), name="clipA")
x = tvm.compute(x.shape, lambda *i: tvm.max(x(*i), const_min), name="clipB")
return x
def conv2d_transpose(N, CI, H, W, CO, KH, KW, strides, padding):
data_shape = (N//env.BATCH, CI//env.BLOCK_IN, H, W, env.BATCH, env.BLOCK_IN)
kernel_shape = (CO//env.BLOCK_OUT, CI//env.BLOCK_IN, KH, KW, env.BLOCK_OUT, env.BLOCK_IN)
data = tvm.placeholder(data_shape, name="data", dtype=env.inp_dtype)
kernel = tvm.placeholder(kernel_shape, name="kernel", dtype=env.wgt_dtype)
with tvm.target.vta():
res = topi.nn.conv2d_transpose_nchw(
Input=data,
Filter=kernel,
strides=strides,
padding=padding,
out_dtype=env.acc_dtype)
res = topi.right_shift(res, env.WGT_WIDTH)
res = my_clip(res, 0, (1 << env.OUT_WIDTH - 1) - 1)
res = topi.cast(res, env.out_dtype)
if tvm.target.current_target().device_name == 'vta':
s = topi.generic.schedule_conv2d_transpose_nchw([res])
else:
s = tvm.create_schedule([res.op])
return s, [data, kernel, res]
if __name__ == '__main__':
# Logging config (for printing tuning log to the screen)
logging.basicConfig()
# logging.getLogger('autotvm').setLevel(logging.DEBUG)
# Tuning log files
log_file = "%s.conv2d_transpose.log" % (env.TARGET)
# create tmp log file
tmp_log_file = log_file + ".tmp"
if os.path.exists(log_file):
os.remove(log_file)
# Get tracker info from env
tracker_host = os.environ.get("TVM_TRACKER_HOST", None)
tracker_port = os.environ.get("TVM_TRACKER_PORT", None)
if not tracker_host or not tracker_port:
print("Set your AutoTVM tracker node host and port variables to run the autotuner")
exit()
for idx, (wl_name, wl) in enumerate(dcgan_wkls):
prefix = "[Task %2d/%2d] " % (idx, len(dcgan_wkls))
# Read in workload parameters
N = wl.batch
H = wl.height
W = wl.width
CI = wl.in_filter
CO = wl.out_filter
KH = wl.hkernel
KW = wl.wkernel
strides = (wl.hstride, wl.wstride)
padding = (wl.hpad, wl.wpad)
# Create task
task = autotvm.task.create(
conv2d_transpose,
args=(N, CI, H, W, CO, KH, KW, strides, padding),
target=tvm.target.vta(),
target_host=env.target_host,
template_key='direct')
print(task.config_space)
# Tune
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.RPCRunner(
env.TARGET, host=tracker_host, port=int(tracker_port),
number=5, timeout=60,
check_correctness=True))
# Run Tuner
tuner = autotvm.tuner.RandomTuner(task)
tuner.tune(
n_trial=len(task.config_space),
early_stopping=None,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(len(task.config_space), prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file)])
# Pick best records to a cache file
autotvm.record.pick_best(tmp_log_file, log_file)
os.remove(tmp_log_file)
|
[] |
[] |
[
"TVM_TRACKER_HOST",
"TVM_TRACKER_PORT"
] |
[]
|
["TVM_TRACKER_HOST", "TVM_TRACKER_PORT"]
|
python
| 2 | 0 | |
cmd/reviewdog/doghouse.go
|
package main
import (
"context"
"errors"
"fmt"
"io"
"log"
"net/http"
"os"
"sort"
"strings"
"github.com/reviewdog/reviewdog"
"github.com/reviewdog/reviewdog/cienv"
"github.com/reviewdog/reviewdog/doghouse"
"github.com/reviewdog/reviewdog/doghouse/client"
"github.com/reviewdog/reviewdog/project"
"golang.org/x/oauth2"
"golang.org/x/sync/errgroup"
)
func runDoghouse(ctx context.Context, r io.Reader, w io.Writer, opt *option, isProject bool, allowNonPR bool) error {
ghInfo, isPr, err := cienv.GetBuildInfo()
if err != nil {
return err
}
if !isPr && !allowNonPR {
fmt.Fprintln(os.Stderr, "reviewdog: this is not PullRequest build.")
return nil
}
resultSet, err := checkResultSet(ctx, r, opt, isProject)
if err != nil {
return err
}
cli, err := newDoghouseCli(ctx)
if err != nil {
return err
}
filteredResultSet, err := postResultSet(ctx, resultSet, ghInfo, cli)
if err != nil {
return err
}
if foundResultInDiff := reportResults(w, filteredResultSet); foundResultInDiff {
return errors.New("found at least one result in diff")
}
return nil
}
func newDoghouseCli(ctx context.Context) (client.DogHouseClientInterface, error) {
// If skipDoghouseServer is true, run doghouse code directly instead of talking to
// the doghouse server because provided GitHub API Token has Check API scope.
skipDoghouseServer := cienv.IsInGitHubAction() && os.Getenv("REVIEWDOG_TOKEN") == ""
if skipDoghouseServer {
token, err := nonEmptyEnv("REVIEWDOG_GITHUB_API_TOKEN")
if err != nil {
return nil, err
}
ghcli, err := githubClient(ctx, token)
if err != nil {
return nil, err
}
return &client.GitHubClient{Client: ghcli}, nil
}
return newDoghouseServerCli(ctx), nil
}
func newDoghouseServerCli(ctx context.Context) *client.DogHouseClient {
httpCli := http.DefaultClient
if token := os.Getenv("REVIEWDOG_TOKEN"); token != "" {
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: token},
)
httpCli = oauth2.NewClient(ctx, ts)
}
return client.New(httpCli)
}
var projectRunAndParse = project.RunAndParse
func checkResultSet(ctx context.Context, r io.Reader, opt *option, isProject bool) (*reviewdog.ResultMap, error) {
resultSet := new(reviewdog.ResultMap)
if isProject {
conf, err := projectConfig(opt.conf)
if err != nil {
return nil, err
}
resultSet, err = projectRunAndParse(ctx, conf, buildRunnersMap(opt.runners), opt.level)
if err != nil {
return nil, err
}
} else {
p, err := newParserFromOpt(opt)
if err != nil {
return nil, err
}
rs, err := p.Parse(r)
if err != nil {
return nil, err
}
resultSet.Store(toolName(opt), &reviewdog.Result{
Level: opt.level,
CheckResults: rs,
})
}
return resultSet, nil
}
func postResultSet(ctx context.Context, resultSet *reviewdog.ResultMap, ghInfo *cienv.BuildInfo, cli client.DogHouseClientInterface) (*reviewdog.FilteredCheckMap, error) {
var g errgroup.Group
wd, _ := os.Getwd()
filteredResultSet := new(reviewdog.FilteredCheckMap)
resultSet.Range(func(name string, result *reviewdog.Result) {
checkResults := result.CheckResults
as := make([]*doghouse.Annotation, 0, len(checkResults))
for _, r := range checkResults {
as = append(as, checkResultToAnnotation(r, wd))
}
req := &doghouse.CheckRequest{
Name: name,
Owner: ghInfo.Owner,
Repo: ghInfo.Repo,
PullRequest: ghInfo.PullRequest,
SHA: ghInfo.SHA,
Branch: ghInfo.Branch,
Annotations: as,
Level: result.Level,
}
g.Go(func() error {
res, err := cli.Check(ctx, req)
if err != nil {
return fmt.Errorf("post failed for %s: %v", name, err)
}
if res.ReportURL != "" {
log.Printf("[%s] reported: %s", name, res.ReportURL)
}
if res.CheckedResults != nil {
filteredResultSet.Store(name, res.CheckedResults)
}
if res.ReportURL == "" && res.CheckedResults == nil {
return fmt.Errorf("No result found for %q", name)
}
return nil
})
})
return filteredResultSet, g.Wait()
}
func checkResultToAnnotation(c *reviewdog.CheckResult, wd string) *doghouse.Annotation {
return &doghouse.Annotation{
Path: reviewdog.CleanPath(c.Path, wd),
Line: c.Lnum,
Message: c.Message,
RawMessage: strings.Join(c.Lines, "\n"),
}
}
// reportResults reports results to given io.Writer and return true if at least
// one annotation result is in diff.
func reportResults(w io.Writer, filteredResultSet *reviewdog.FilteredCheckMap) bool {
// Sort names to get deterministic result.
var names []string
filteredResultSet.Range(func(name string, results []*reviewdog.FilteredCheck) {
names = append(names, name)
})
sort.Strings(names)
foundInDiff := false
for _, name := range names {
results, err := filteredResultSet.Load(name)
if err != nil {
// Should not happen.
log.Printf("reviewdog: result not found for %q", name)
continue
}
fmt.Fprintf(w, "reviwedog: Reporting results for %q\n", name)
foundResultPerName := false
filteredNum := 0
for _, result := range results {
if !result.InDiff {
filteredNum++
continue
}
foundInDiff = true
foundResultPerName = true
// Output original lines.
for _, line := range result.Lines {
fmt.Fprintln(w, line)
}
}
if !foundResultPerName {
fmt.Fprintf(w, "reviwedog: No results found for %q. %d results found outside diff.\n", name, filteredNum)
}
}
return foundInDiff
}
|
[
"\"REVIEWDOG_TOKEN\"",
"\"REVIEWDOG_TOKEN\""
] |
[] |
[
"REVIEWDOG_TOKEN"
] |
[]
|
["REVIEWDOG_TOKEN"]
|
go
| 1 | 0 | |
docker/x86_64/app/client_component_publish.py
|
from autobahn.twisted.component import Component, run
from autobahn.twisted.util import sleep
from twisted.internet.defer import inlineCallbacks
import os
import argparse
import six
url = os.environ.get('CBURL', u'ws://localhost:8080/ws')
realmv = os.environ.get('CBREALM', u'realm1')
print(url, realmv)
component = Component(transports=url, realm=realmv)
@component.on_join
@inlineCallbacks
def joined(session, details):
print("session ready")
counter = 0
while True:
# publish() only returns a Deferred if we asked for an acknowledgement
session.publish(u'com.myapp.hello', "Hello World {0}".format(counter))
counter += 1
yield sleep(1)
if __name__ == "__main__":
run([component])
|
[] |
[] |
[
"CBREALM",
"CBURL"
] |
[]
|
["CBREALM", "CBURL"]
|
python
| 2 | 0 | |
pkg/process/config/config.go
|
package config
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
"time"
model "github.com/DataDog/agent-payload/process"
"github.com/DataDog/datadog-agent/cmd/agent/api/pb"
"github.com/DataDog/datadog-agent/pkg/config"
oconfig "github.com/DataDog/datadog-agent/pkg/orchestrator/config"
"github.com/DataDog/datadog-agent/pkg/process/util"
apicfg "github.com/DataDog/datadog-agent/pkg/process/util/api/config"
"github.com/DataDog/datadog-agent/pkg/util/fargate"
ddgrpc "github.com/DataDog/datadog-agent/pkg/util/grpc"
"github.com/DataDog/datadog-agent/pkg/util/log"
"google.golang.org/grpc"
)
const (
// defaultProxyPort is the default port used for proxies.
// This mirrors the configuration for the infrastructure agent.
defaultProxyPort = 3128
// defaultSystemProbeBPFDir is the default path for eBPF programs
defaultSystemProbeBPFDir = "/opt/datadog-agent/embedded/share/system-probe/ebpf"
// defaultRuntimeCompilerOutputDir is the default path for output from the system-probe runtime compiler
defaultRuntimeCompilerOutputDir = "/var/tmp/datadog-agent/system-probe/build"
defaultGRPCConnectionTimeout = 60 * time.Second
)
// Name for check performed by process-agent or system-probe
const (
ProcessCheckName = "process"
RTProcessCheckName = "rtprocess"
ContainerCheckName = "container"
RTContainerCheckName = "rtcontainer"
ConnectionsCheckName = "connections"
PodCheckName = "pod"
NetworkCheckName = "Network"
OOMKillCheckName = "OOM Kill"
TCPQueueLengthCheckName = "TCP queue length"
)
var (
processChecks = []string{ProcessCheckName, RTProcessCheckName}
containerChecks = []string{ContainerCheckName, RTContainerCheckName}
)
type proxyFunc func(*http.Request) (*url.URL, error)
type cmdFunc = func(name string, arg ...string) *exec.Cmd
// WindowsConfig stores all windows-specific configuration for the process-agent and system-probe.
type WindowsConfig struct {
// Number of checks runs between refreshes of command-line arguments
ArgsRefreshInterval int
// Controls getting process arguments immediately when a new process is discovered
AddNewArgs bool
//System Probe Configuration
// EnableMonotonicCount determines if we will calculate send/recv bytes of connections with headers and retransmits
EnableMonotonicCount bool
// DriverBufferSize (bytes) determines the size of the buffer we pass to the driver when reading flows
DriverBufferSize int
}
// AgentConfig is the global config for the process-agent. This information
// is sourced from config files and the environment variables.
type AgentConfig struct {
Enabled bool
HostName string
APIEndpoints []apicfg.Endpoint
LogFile string
LogLevel string
LogToConsole bool
QueueSize int // The number of items allowed in each delivery queue.
ProcessQueueBytes int // The total number of bytes that can be enqueued for delivery to the process intake endpoint
Blacklist []*regexp.Regexp
Scrubber *DataScrubber
MaxPerMessage int
MaxConnsPerMessage int
AllowRealTime bool
Transport *http.Transport `json:"-"`
DDAgentBin string
StatsdHost string
StatsdPort int
ProcessExpVarPort int
ProfilingEnabled bool
ProfilingSite string
ProfilingURL string
ProfilingAPIKey string
ProfilingEnvironment string
// host type of the agent, used to populate container payload with additional host information
ContainerHostType model.ContainerHostType
// System probe collection configuration
EnableSystemProbe bool
DisableTCPTracing bool
DisableUDPTracing bool
DisableIPv6Tracing bool
DisableDNSInspection bool
CollectLocalDNS bool
EnableHTTPMonitoring bool
SystemProbeAddress string
SystemProbeLogFile string
SystemProbeBPFDir string
MaxTrackedConnections uint
SysProbeBPFDebug bool
ExcludedBPFLinuxVersions []string
ExcludedSourceConnections map[string][]string
ExcludedDestinationConnections map[string][]string
EnableConntrack bool
ConntrackMaxStateSize int
ConntrackRateLimit int
IgnoreConntrackInitFailure bool
EnableConntrackAllNamespaces bool
SystemProbeDebugPort int
ClosedChannelSize int
MaxClosedConnectionsBuffered int
MaxConnectionsStateBuffered int
OffsetGuessThreshold uint64
EnableTracepoints bool
EnableRuntimeCompiler bool
KernelHeadersDirs []string
RuntimeCompilerOutputDir string
EnableGatewayLookup bool
// Orchestrator config
Orchestrator *oconfig.OrchestratorConfig
// DNS stats configuration
CollectDNSStats bool
DNSTimeout time.Duration
CollectDNSDomains bool
MaxDNSStats int
// Check config
EnabledChecks []string
CheckIntervals map[string]time.Duration
// Internal store of a proxy used for generating the Transport
proxy proxyFunc
// Windows-specific config
Windows WindowsConfig
grpcConnectionTimeout time.Duration
}
// CheckIsEnabled returns a bool indicating if the given check name is enabled.
func (a AgentConfig) CheckIsEnabled(checkName string) bool {
return util.StringInSlice(a.EnabledChecks, checkName)
}
// CheckInterval returns the interval for the given check name, defaulting to 10s if not found.
func (a AgentConfig) CheckInterval(checkName string) time.Duration {
d, ok := a.CheckIntervals[checkName]
if !ok {
log.Errorf("missing check interval for '%s', you must set a default", checkName)
d = 10 * time.Second
}
return d
}
const (
defaultProcessEndpoint = "https://process.datadoghq.com"
maxMessageBatch = 100
maxConnsMessageBatch = 1000
defaultMaxTrackedConnections = 65536
maxOffsetThreshold = 3000
)
// NewDefaultTransport provides a http transport configuration with sane default timeouts
func NewDefaultTransport() *http.Transport {
return &http.Transport{
MaxIdleConns: 5,
IdleConnTimeout: 90 * time.Second,
Dial: (&net.Dialer{
Timeout: 10 * time.Second,
KeepAlive: 10 * time.Second,
}).Dial,
TLSHandshakeTimeout: 5 * time.Second,
ResponseHeaderTimeout: 5 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
}
// NewDefaultAgentConfig returns an AgentConfig with defaults initialized
func NewDefaultAgentConfig(canAccessContainers bool) *AgentConfig {
processEndpoint, err := url.Parse(defaultProcessEndpoint)
if err != nil {
// This is a hardcoded URL so parsing it should not fail
panic(err)
}
var enabledChecks []string
if canAccessContainers {
enabledChecks = containerChecks
}
ac := &AgentConfig{
Enabled: canAccessContainers, // We'll always run inside of a container.
APIEndpoints: []apicfg.Endpoint{{Endpoint: processEndpoint}},
LogFile: defaultLogFilePath,
LogLevel: "info",
LogToConsole: false,
// Allow buffering up to 75 megabytes of payload data in total
ProcessQueueBytes: 60 * 1000 * 1000,
// This can be fairly high as the input should get throttled by queue bytes first.
// Assuming we generate ~8 checks/minute (for process/network), this should allow buffering of ~30 minutes of data assuming it fits within the queue bytes memory budget
QueueSize: 256,
MaxPerMessage: 100,
MaxConnsPerMessage: 600,
AllowRealTime: true,
HostName: "",
Transport: NewDefaultTransport(),
ProcessExpVarPort: 6062,
ContainerHostType: model.ContainerHostType_notSpecified,
// Statsd for internal instrumentation
StatsdHost: "127.0.0.1",
StatsdPort: 8125,
// System probe collection configuration
EnableSystemProbe: false,
DisableTCPTracing: false,
DisableUDPTracing: false,
DisableIPv6Tracing: false,
DisableDNSInspection: false,
EnableHTTPMonitoring: false,
SystemProbeAddress: defaultSystemProbeAddress,
SystemProbeLogFile: defaultSystemProbeLogFilePath,
SystemProbeBPFDir: defaultSystemProbeBPFDir,
MaxTrackedConnections: defaultMaxTrackedConnections,
EnableConntrack: true,
ClosedChannelSize: 500,
ConntrackMaxStateSize: defaultMaxTrackedConnections * 2,
ConntrackRateLimit: 500,
IgnoreConntrackInitFailure: false,
EnableConntrackAllNamespaces: true,
OffsetGuessThreshold: 400,
EnableTracepoints: false,
CollectDNSStats: true,
CollectDNSDomains: false,
EnableRuntimeCompiler: false,
RuntimeCompilerOutputDir: defaultRuntimeCompilerOutputDir,
EnableGatewayLookup: false,
// Orchestrator config
Orchestrator: oconfig.NewDefaultOrchestratorConfig(),
// Check config
EnabledChecks: enabledChecks,
CheckIntervals: map[string]time.Duration{
ProcessCheckName: 10 * time.Second,
RTProcessCheckName: 2 * time.Second,
ContainerCheckName: 10 * time.Second,
RTContainerCheckName: 2 * time.Second,
ConnectionsCheckName: 30 * time.Second,
PodCheckName: 10 * time.Second,
},
// DataScrubber to hide command line sensitive words
Scrubber: NewDefaultDataScrubber(),
Blacklist: make([]*regexp.Regexp, 0),
// Windows process config
Windows: WindowsConfig{
ArgsRefreshInterval: 15, // with default 20s check interval we refresh every 5m
AddNewArgs: true,
EnableMonotonicCount: false,
DriverBufferSize: 1024,
},
grpcConnectionTimeout: defaultGRPCConnectionTimeout,
}
// Set default values for proc/sys paths if unset.
// Don't set this is /host is not mounted to use context within container.
// Generally only applicable for container-only cases like Fargate.
if config.IsContainerized() && util.PathExists("/host") {
if v := os.Getenv("HOST_PROC"); v == "" {
os.Setenv("HOST_PROC", "/host/proc")
}
if v := os.Getenv("HOST_SYS"); v == "" {
os.Setenv("HOST_SYS", "/host/sys")
}
}
return ac
}
func loadConfigIfExists(path string) error {
if util.PathExists(path) {
config.Datadog.AddConfigPath(path)
if strings.HasSuffix(path, ".yaml") { // If they set a config file directly, let's try to honor that
config.Datadog.SetConfigFile(path)
}
if _, err := config.LoadWithoutSecret(); err != nil {
return err
}
} else {
log.Infof("no config exists at %s, ignoring...", path)
}
return nil
}
func mergeConfigIfExists(path string) error {
if util.PathExists(path) {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
if err := config.Datadog.MergeConfig(file); err != nil {
return err
}
} else {
log.Infof("no config exists at %s, ignoring...", path)
}
return nil
}
// NewAgentConfig returns an AgentConfig using a configuration file. It can be nil
// if there is no file available. In this case we'll configure only via environment.
func NewAgentConfig(loggerName config.LoggerName, yamlPath, netYamlPath string) (*AgentConfig, error) {
var err error
// Note: This only considers container sources that are already setup. It's possible that container sources may
// need a few minutes to be ready on newly provisioned hosts.
_, err = util.GetContainers()
canAccessContainers := err == nil
cfg := NewDefaultAgentConfig(canAccessContainers)
// For Agent 6 we will have a YAML config file to use.
if err := loadConfigIfExists(yamlPath); err != nil {
return nil, err
}
if err := cfg.LoadProcessYamlConfig(yamlPath); err != nil {
return nil, err
}
if err := cfg.Orchestrator.Load(); err != nil {
return nil, err
}
// (Re)configure the logging from our configuration
if err := setupLogger(loggerName, cfg.LogFile, cfg); err != nil {
log.Errorf("failed to setup configured logger: %s", err)
return nil, err
}
// For system probe, there is an additional config file that is shared with the system-probe
mergeConfigIfExists(netYamlPath) //nolint:errcheck
if err = cfg.loadSysProbeYamlConfig(netYamlPath); err != nil {
return nil, err
}
// TODO: Once proxies have been moved to common config util, remove this
if cfg.proxy, err = proxyFromEnv(cfg.proxy); err != nil {
log.Errorf("error parsing environment proxy settings, not using a proxy: %s", err)
cfg.proxy = nil
}
// Python-style log level has WARNING vs WARN
if strings.ToLower(cfg.LogLevel) == "warning" {
cfg.LogLevel = "warn"
}
if cfg.HostName == "" {
// lookup hostname if there is no config override
if hostname, err := getHostname(cfg.DDAgentBin, cfg.grpcConnectionTimeout); err == nil {
cfg.HostName = hostname
} else {
log.Errorf("Cannot get hostname: %v", err)
}
}
cfg.ContainerHostType = getContainerHostType()
if cfg.proxy != nil {
cfg.Transport.Proxy = cfg.proxy
}
// sanity check. This element is used with the modulo operator (%), so it can't be zero.
// if it is, log the error, and assume the config was attempting to disable
if cfg.Windows.ArgsRefreshInterval == 0 {
log.Warnf("invalid configuration: windows_collect_skip_new_args was set to 0. Disabling argument collection")
cfg.Windows.ArgsRefreshInterval = -1
}
// activate the pod collection if enabled and we have the cluster name set
if cfg.Orchestrator.OrchestrationCollectionEnabled {
if cfg.Orchestrator.KubeClusterName != "" {
cfg.EnabledChecks = append(cfg.EnabledChecks, PodCheckName)
} else {
log.Warnf("Failed to auto-detect a Kubernetes cluster name. Pod collection will not start. To fix this, set it manually via the cluster_name config option")
}
}
return cfg, nil
}
// NewSystemProbeConfig returns a system-probe specific AgentConfig using a configuration file. It can be nil
// if there is no file available. In this case we'll configure only via environment.
func NewSystemProbeConfig(loggerName config.LoggerName, yamlPath string) (*AgentConfig, error) {
cfg := NewDefaultAgentConfig(false) // We don't access the container APIs in the system-probe
// When the system-probe is enabled in a separate container, we need a way to also disable the system-probe
// packaged in the main agent container (without disabling network collection on the process-agent).
//
// If this environment flag is set, it'll sure it will not start
if ok, _ := isAffirmative(os.Getenv("DD_SYSTEM_PROBE_EXTERNAL")); ok {
cfg.EnableSystemProbe = false
return cfg, nil
}
loadConfigIfExists(yamlPath) //nolint:errcheck
if err := cfg.loadSysProbeYamlConfig(yamlPath); err != nil {
return nil, err
}
// (Re)configure the logging from our configuration, with the system probe log file + config options
if err := setupLogger(loggerName, cfg.SystemProbeLogFile, cfg); err != nil {
log.Errorf("failed to setup configured logger: %s", err)
return nil, err
}
return cfg, nil
}
// getContainerHostType uses the fargate library to detect container environment and returns the protobuf version of it
func getContainerHostType() model.ContainerHostType {
switch fargate.GetOrchestrator() {
case fargate.ECS:
return model.ContainerHostType_fargateECS
case fargate.EKS:
return model.ContainerHostType_fargateEKS
}
return model.ContainerHostType_notSpecified
}
func loadEnvVariables() {
// The following environment variables will be loaded in the order listed, meaning variables
// further down the list may override prior variables.
for _, variable := range []struct{ env, cfg string }{
{"DD_PROCESS_AGENT_CONTAINER_SOURCE", "process_config.container_source"},
{"DD_SCRUB_ARGS", "process_config.scrub_args"},
{"DD_STRIP_PROCESS_ARGS", "process_config.strip_proc_arguments"},
{"DD_PROCESS_AGENT_URL", "process_config.process_dd_url"},
{"DD_PROCESS_AGENT_PROFILING_ENABLED", "process_config.profiling.enabled"},
{"DD_PROCESS_AGENT_REMOTE_TAGGER", "process_config.remote_tagger"},
{"DD_ORCHESTRATOR_URL", "orchestrator_explorer.orchestrator_dd_url"},
{"DD_HOSTNAME", "hostname"},
{"DD_DOGSTATSD_PORT", "dogstatsd_port"},
{"DD_BIND_HOST", "bind_host"},
{"HTTPS_PROXY", "proxy.https"},
{"DD_PROXY_HTTPS", "proxy.https"},
{"DD_LOGS_STDOUT", "log_to_console"},
{"LOG_TO_CONSOLE", "log_to_console"},
{"DD_LOG_TO_CONSOLE", "log_to_console"},
{"LOG_LEVEL", "log_level"}, // Support LOG_LEVEL and DD_LOG_LEVEL but prefer DD_LOG_LEVEL
{"DD_LOG_LEVEL", "log_level"},
} {
if v, ok := os.LookupEnv(variable.env); ok {
config.Datadog.Set(variable.cfg, v)
}
}
// Load the System Probe environment variables
loadSysProbeEnvVariables()
// Support API_KEY and DD_API_KEY but prefer DD_API_KEY.
apiKey, envKey := os.Getenv("DD_API_KEY"), "DD_API_KEY"
if apiKey == "" {
apiKey, envKey = os.Getenv("API_KEY"), "API_KEY"
}
if apiKey != "" { // We don't want to overwrite the API KEY provided as an environment variable
log.Infof("overriding API key from env %s value", envKey)
config.Datadog.Set("api_key", config.SanitizeAPIKey(strings.Split(apiKey, ",")[0]))
}
if v := os.Getenv("DD_CUSTOM_SENSITIVE_WORDS"); v != "" {
config.Datadog.Set("process_config.custom_sensitive_words", strings.Split(v, ","))
}
if v := os.Getenv("DD_PROCESS_ADDITIONAL_ENDPOINTS"); v != "" {
endpoints := make(map[string][]string)
if err := json.Unmarshal([]byte(v), &endpoints); err != nil {
log.Errorf(`Could not parse DD_PROCESS_ADDITIONAL_ENDPOINTS: %v. It must be of the form '{"https://process.agent.datadoghq.com": ["apikey1", ...], ...}'.`, err)
} else {
config.Datadog.Set("process_config.additional_endpoints", endpoints)
}
}
if v := os.Getenv("DD_ORCHESTRATOR_ADDITIONAL_ENDPOINTS"); v != "" {
endpoints := make(map[string][]string)
if err := json.Unmarshal([]byte(v), &endpoints); err != nil {
log.Errorf(`Could not parse DD_ORCHESTRATOR_ADDITIONAL_ENDPOINTS: %v. It must be of the form '{"https://process.agent.datadoghq.com": ["apikey1", ...], ...}'.`, err)
} else {
config.Datadog.Set("orchestrator_explorer.orchestrator_additional_endpoints", endpoints)
}
}
}
func loadSysProbeEnvVariables() {
for _, variable := range []struct{ env, cfg string }{
{"DD_SYSTEM_PROBE_ENABLED", "system_probe_config.enabled"},
{"DD_SYSTEM_PROBE_NETWORK_ENABLED", "network_config.enabled"},
{"DD_SYSTEM_PROBE_NETWORK_ENABLE_HTTP_MONITORING", "network_config.enable_http_monitoring"},
{"DD_SYSTEM_PROBE_CONNTRACK_IGNORE_ENOBUFS", "system_probe_config.conntrack_ignore_enobufs"},
{"DD_SYSTEM_PROBE_ENABLE_CONNTRACK_ALL_NAMESPACES", "system_probe_config.enable_conntrack_all_namespaces"},
{"DD_SYSTEM_PROBE_NETWORK_IGNORE_CONNTRACK_INIT_FAILURE", "network_config.ignore_conntrack_init_failure"},
{"DD_DISABLE_TCP_TRACING", "system_probe_config.disable_tcp"},
{"DD_DISABLE_UDP_TRACING", "system_probe_config.disable_udp"},
{"DD_DISABLE_IPV6_TRACING", "system_probe_config.disable_ipv6"},
{"DD_DISABLE_DNS_INSPECTION", "system_probe_config.disable_dns_inspection"},
{"DD_COLLECT_LOCAL_DNS", "system_probe_config.collect_local_dns"},
{"DD_COLLECT_DNS_STATS", "system_probe_config.collect_dns_stats"},
{"DD_SYSTEM_PROBE_PROFILING_ENABLED", "system_probe_config.profiling.enabled"},
{"DD_SITE", "system_probe_config.profiling.site"},
{"DD_APM_PROFILING_DD_URL", "system_probe_config.profiling.profile_dd_url"},
{"DD_API_KEY", "system_probe_config.profiling.api_key"},
{"DD_ENV", "system_probe_config.profiling.env"},
{"DD_COLLECT_DNS_DOMAINS", "system_probe_config.collect_dns_domains"},
{"DD_ENABLE_RUNTIME_COMPILER", "system_probe_config.enable_runtime_compiler"},
{"DD_KERNEL_HEADER_DIRS", "system_probe_config.kernel_header_dirs"},
{"DD_RUNTIME_COMPILER_OUTPUT_DIR", "system_probe_config.runtime_compiler_output_dir"},
{"DD_SYSTEM_PROBE_NETWORK_ENABLE_GATEWAY_LOOKUP", "network_config.enable_gateway_lookup"},
} {
if v, ok := os.LookupEnv(variable.env); ok {
config.Datadog.Set(variable.cfg, v)
}
}
if v, ok := os.LookupEnv("DD_SYSPROBE_SOCKET"); ok {
if err := ValidateSysprobeSocket(v); err != nil {
log.Errorf("Could not parse DD_SYSPROBE_SOCKET: %s", err)
} else {
config.Datadog.Set(key(spNS, "sysprobe_socket"), v)
}
}
}
// IsBlacklisted returns a boolean indicating if the given command is blacklisted by our config.
func IsBlacklisted(cmdline []string, blacklist []*regexp.Regexp) bool {
cmd := strings.Join(cmdline, " ")
for _, b := range blacklist {
if b.MatchString(cmd) {
return true
}
}
return false
}
func isAffirmative(value string) (bool, error) {
if value == "" {
return false, fmt.Errorf("value is empty")
}
v := strings.ToLower(value)
return v == "true" || v == "yes" || v == "1", nil
}
// getHostname attempts to resolve the hostname in the following order: the main datadog agent via grpc, the main agent
// via cli and lastly falling back to os.Hostname() if it is unavailable
func getHostname(ddAgentBin string, grpcConnectionTimeout time.Duration) (string, error) {
// Fargate is handled as an exceptional case (there is no concept of a host, so we use the ARN in-place).
if fargate.IsFargateInstance() {
hostname, err := fargate.GetFargateHost()
if err == nil {
return hostname, nil
}
log.Errorf("failed to get Fargate host: %v", err)
}
// Get the hostname via gRPC from the main agent if a hostname has not been set either from config/fargate
hostname, err := getHostnameFromGRPC(ddgrpc.GetDDAgentClient, grpcConnectionTimeout)
if err == nil {
return hostname, nil
}
log.Errorf("failed to get hostname from grpc: %v", err)
// If the hostname is not set then we fallback to use the agent binary
hostname, err = getHostnameFromCmd(ddAgentBin, exec.Command)
if err == nil {
return hostname, nil
}
log.Errorf("failed to get hostname from cmd: %v", err)
return os.Hostname()
}
// getHostnameCmd shells out to obtain the hostname used by the infra agent
func getHostnameFromCmd(ddAgentBin string, cmdFn cmdFunc) (string, error) {
cmd := cmdFn(ddAgentBin, "hostname")
// Copying all environment variables to child process
// Windows: Required, so the child process can load DLLs, etc.
// Linux: Optional, but will make use of DD_HOSTNAME and DOCKER_DD_AGENT if they exist
cmd.Env = append(cmd.Env, os.Environ()...)
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
return "", err
}
hostname := strings.TrimSpace(stdout.String())
if hostname == "" {
return "", fmt.Errorf("error retrieving dd-agent hostname %s", stderr.String())
}
return hostname, nil
}
// getHostnameFromGRPC retrieves the hostname from the main datadog agent via GRPC
func getHostnameFromGRPC(grpcClientFn func(ctx context.Context, opts ...grpc.DialOption) (pb.AgentClient, error), grpcConnectionTimeout time.Duration) (string, error) {
ctx, cancel := context.WithTimeout(context.Background(), grpcConnectionTimeout)
defer cancel()
ddAgentClient, err := grpcClientFn(ctx)
if err != nil {
return "", fmt.Errorf("cannot connect to datadog agent via grpc: %w", err)
}
reply, err := ddAgentClient.GetHostname(ctx, &pb.HostnameRequest{})
if err != nil {
return "", fmt.Errorf("cannot get hostname from datadog agent via grpc: %w", err)
}
log.Debugf("retrieved hostname:%s from datadog agent via grpc", reply.Hostname)
return reply.Hostname, nil
}
// proxyFromEnv parses out the proxy configuration from the ENV variables in a
// similar way to getProxySettings and, if enough values are available, returns
// a new proxy URL value. If the environment is not set for this then the
// `defaultVal` is returned.
func proxyFromEnv(defaultVal proxyFunc) (proxyFunc, error) {
var host string
scheme := "http"
if v := os.Getenv("PROXY_HOST"); v != "" {
// accept either http://myproxy.com or myproxy.com
if i := strings.Index(v, "://"); i != -1 {
// when available, parse the scheme from the url
scheme = v[0:i]
host = v[i+3:]
} else {
host = v
}
}
if host == "" {
return defaultVal, nil
}
port := defaultProxyPort
if v := os.Getenv("PROXY_PORT"); v != "" {
port, _ = strconv.Atoi(v)
}
var user, password string
if v := os.Getenv("PROXY_USER"); v != "" {
user = v
}
if v := os.Getenv("PROXY_PASSWORD"); v != "" {
password = v
}
return constructProxy(host, scheme, port, user, password)
}
// constructProxy constructs a *url.Url for a proxy given the parts of a
// Note that we assume we have at least a non-empty host for this call but
// all other values can be their defaults (empty string or 0).
func constructProxy(host, scheme string, port int, user, password string) (proxyFunc, error) {
var userpass *url.Userinfo
if user != "" {
if password != "" {
userpass = url.UserPassword(user, password)
} else {
userpass = url.User(user)
}
}
var path string
if userpass != nil {
path = fmt.Sprintf("%s@%s:%v", userpass.String(), host, port)
} else {
path = fmt.Sprintf("%s:%v", host, port)
}
if scheme != "" {
path = fmt.Sprintf("%s://%s", scheme, path)
}
u, err := url.Parse(path)
if err != nil {
return nil, err
}
return http.ProxyURL(u), nil
}
func setupLogger(loggerName config.LoggerName, logFile string, cfg *AgentConfig) error {
return config.SetupLogger(
loggerName,
cfg.LogLevel,
logFile,
config.GetSyslogURI(),
config.Datadog.GetBool("syslog_rfc"),
config.Datadog.GetBool("log_to_console"),
config.Datadog.GetBool("log_format_json"),
)
}
|
[
"\"HOST_PROC\"",
"\"HOST_SYS\"",
"\"DD_SYSTEM_PROBE_EXTERNAL\"",
"\"DD_API_KEY\"",
"\"API_KEY\"",
"\"DD_CUSTOM_SENSITIVE_WORDS\"",
"\"DD_PROCESS_ADDITIONAL_ENDPOINTS\"",
"\"DD_ORCHESTRATOR_ADDITIONAL_ENDPOINTS\"",
"\"PROXY_HOST\"",
"\"PROXY_PORT\"",
"\"PROXY_USER\"",
"\"PROXY_PASSWORD\""
] |
[] |
[
"HOST_SYS",
"PROXY_PASSWORD",
"DD_SYSTEM_PROBE_EXTERNAL",
"PROXY_HOST",
"API_KEY",
"HOST_PROC",
"PROXY_USER",
"PROXY_PORT",
"DD_PROCESS_ADDITIONAL_ENDPOINTS",
"DD_ORCHESTRATOR_ADDITIONAL_ENDPOINTS",
"DD_API_KEY",
"DD_CUSTOM_SENSITIVE_WORDS"
] |
[]
|
["HOST_SYS", "PROXY_PASSWORD", "DD_SYSTEM_PROBE_EXTERNAL", "PROXY_HOST", "API_KEY", "HOST_PROC", "PROXY_USER", "PROXY_PORT", "DD_PROCESS_ADDITIONAL_ENDPOINTS", "DD_ORCHESTRATOR_ADDITIONAL_ENDPOINTS", "DD_API_KEY", "DD_CUSTOM_SENSITIVE_WORDS"]
|
go
| 12 | 0 | |
load-balancer.go
|
package main
import (
info "github.com/google/cadvisor/info/v1"
"errors"
"fmt"
"log"
"net/http"
"strings"
"github.com/google/cadvisor/client"
"os"
"sync"
"time"
)
// monitor : URL where the cAdvisor container is
// filter : the name of the servers the clients want to contact
// port : port we will listen on
var filter, monitor, port string
var mapContainers = make(map[string]uint64)
var mutex sync.Mutex
// for every tick, will ask to update the data with the running containers
// this method will run in background so it will not stop the HTTP server
func updateContainers() {
listContainers()
for _ = range time.Tick(3 * time.Second) {
log.Println("Starting list of containers update...")
listContainers()
log.Print("Done.\n\n")
}
}
// update the mapContainers variable with containers containing the filter variable in their name
func listContainers() {
allContainers, _ := getAllContainerInfo(monitor)
// resetting mapContainers
mutex.Lock()
for key := range mapContainers {
delete(mapContainers, key)
}
// filtering data and filling mapContainers
for _, item := range allContainers {
alias := item.Aliases[0]
kbFree := item.Stats[0].Memory.Usage
if strings.Contains(alias, filter) {
mapContainers[alias] = kbFree
}
}
// logging for the containers in mapContainers
log.Println("\tFound", len(mapContainers), "containers")
for key, value := range mapContainers {
log.Printf("\t\t%s %d", key, value)
}
mutex.Unlock()
}
// determine the container with the more available RAM
// throw an error when no servers are found
func getLessLoaded() (string, error) {
var lessLoaded string
mutex.Lock()
for key := range mapContainers {
if lessLoaded == "" {
lessLoaded = key
} else if mapContainers[key] < mapContainers[lessLoaded] {
lessLoaded = key
}
}
mutex.Unlock()
if lessLoaded == "" {
return "", errors.New("No server found...")
}
return lessLoaded, nil
}
// will write to the client the less loaded server
// the reply will include an HTTP code between 200 (ok) and 500 (server encountered an error)
func handleRoot(w http.ResponseWriter, r *http.Request) {
server, err := getLessLoaded()
if detectError(err, false) {
w.WriteHeader(500) //warn the user that the server encountered a problem
} else {
w.WriteHeader(200)
}
w.Write([]byte(server))
}
// query cAdvisor for all the running containers on the same host
// throw an error if cAdvisor get one, it will only log it in standard output
func getAllContainerInfo(cadvisor string) ([]info.ContainerInfo, error) {
client, err := client.NewClient(cadvisor)
if detectError(err, true) {
return nil, err
}
request := info.DefaultContainerInfoRequest()
allContainers, err := client.AllDockerContainers(&request)
if detectError(err, true) {
return nil, err
}
return allContainers, nil
}
// will help to display any error
func detectError(err error, doLog bool) bool {
if err != nil {
if doLog {
log.Println(err)
}
return true
}
return false
}
// clients will be able to ask at / which server is the less loaded
func main() {
// getting all the variables needed to run
filter = os.Getenv("FILTER")
monitor = os.Getenv("MONITOR")
port = os.Getenv("HTTP_PORT")
// check if all variables are set
if filter == "" {
log.Fatalln("FILTER environment variable is missing")
}
if monitor == "" {
log.Fatalln("MONITOR environment variable is missing")
}
if port == "" {
log.Fatalln("HTTP_PORT environment variable is missing")
}
go updateContainers()
http.HandleFunc("/", handleRoot)
fmt.Println("Listening on http://127.0.0.1" + port)
log.Fatal(http.ListenAndServe(port, nil))
}
|
[
"\"FILTER\"",
"\"MONITOR\"",
"\"HTTP_PORT\""
] |
[] |
[
"HTTP_PORT",
"FILTER",
"MONITOR"
] |
[]
|
["HTTP_PORT", "FILTER", "MONITOR"]
|
go
| 3 | 0 | |
tests/test_special_settings.py
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
import pytest
from mock import MagicMock, call, patch
from simple_settings.special_settings import (
configure_logging,
override_settings_by_env,
process_special_settings,
required_not_none_settings,
required_settings,
required_settings_types
)
class TestSpecialSettings(object):
@pytest.fixture
def settings_dict_to_override(self):
return {
'SIMPLE_SETTINGS': {
'OVERRIDE_BY_ENV': True
},
'SIMPLE_STRING': 'simple',
'SIMPLE_INTEGER': 1
}
@pytest.fixture
def settings_dict_required(self):
return {
'SIMPLE_SETTINGS': {
'REQUIRED_SETTINGS': ('SIMPLE_STRING', 'LOST_SETTING')
},
'SIMPLE_STRING': None,
}
@pytest.fixture
def settings_dict_required_not_none(self):
return {
'SIMPLE_SETTINGS': {
'REQUIRED_NOT_NONE_SETTINGS': ('SIMPLE_STRING', )
},
'SIMPLE_STRING': None,
'SIMPLE_INTEGER': None
}
@pytest.fixture
def settings_dict_required_types_unsupported_type(self):
return {
'SIMPLE_SETTINGS': {
'REQUIRED_SETTINGS_TYPES': {
'SIMPLE_STRING': 'str',
'UNSUPPORTED_TYPE': 'foo'
}
}
}
@pytest.fixture
def settings_dict_required_types_invalid_types(self):
return {
'SIMPLE_SETTINGS': {
'REQUIRED_SETTINGS_TYPES': {
'SIMPLE_INTEGER': 'int',
'SIMPLE_BOOL': 'bool',
'SIMPLE_STR': 'json.loads',
}
},
'SIMPLE_INTEGER': 0.1, # not an int and not a str so cannot parse
'SIMPLE_BOOL': 'foo', # not a bool and not parseable to a bool
'SIMPLE_STR': 'foo', # not a valid value to perform json.loads
}
@pytest.fixture
def settings_dict_required_types_valid_types(self):
return {
'SIMPLE_SETTINGS': {
'REQUIRED_SETTINGS_TYPES': {
'STRING_NONE': 'str',
'STRING_NATIVE': 'str',
'INTEGER_NONE': 'int',
'INTEGER_NATIVE': 'int',
'INTEGER_PARSED': 'int',
'FLOAT_NONE': 'float',
'FLOAT_NATIVE': 'float',
'FLOAT_PARSED': 'float',
'BOOL_NONE': 'bool',
'BOOL_NATIVE': 'bool',
'BOOL_PARSED_1': 'bool',
'BOOL_PARSED_3': 'bool',
'BOOL_PARSED_4': 'bool',
'BOOL_PARSED_5': 'bool',
'JSON_LOADS_PARSED_1': 'json.loads',
'JSON_LOADS_PARSED_2': 'json.loads',
'JSON_LOADS_PARSED_3': 'json.loads',
'JSON_LOADS_PARSED_4': 'json.loads',
'JSON_LOADS_PARSED_5': 'json.loads',
'JSON_LOADS_PARSED_6': 'json.loads',
}
},
'STRING_NONE': None,
'STRING_NATIVE': 'simple',
'INTEGER_NONE': None,
'INTEGER_NATIVE': 2,
'INTEGER_PARSED': '3',
'FLOAT_NONE': None,
'FLOAT_NATIVE': 0.2,
'FLOAT_PARSED': '0.3',
'BOOL_NONE': None,
'BOOL_NATIVE': True,
'BOOL_PARSED_1': 'true',
'BOOL_PARSED_3': 'True',
'BOOL_PARSED_4': 'false',
'BOOL_PARSED_5': 'False',
'JSON_LOADS_PARSED_1': '{"simple": "value"}',
'JSON_LOADS_PARSED_2': 'true',
'JSON_LOADS_PARSED_3': '1',
'JSON_LOADS_PARSED_4': [1, 3],
'JSON_LOADS_PARSED_5': '[1, 3]',
'JSON_LOADS_PARSED_6': {"simple": "value"},
}
@pytest.fixture
def settings_dict_override_and_required(self):
return {
'SIMPLE_SETTINGS': {
'OVERRIDE_BY_ENV': True,
'REQUIRED_SETTINGS': ('SIMPLE_INTEGER', ),
'REQUIRED_NOT_NONE_SETTINGS': ('SIMPLE_INTEGER', ),
'REQUIRED_SETTINGS_TYPES': {
'SIMPLE_INTEGER': 'int'
}
},
'SIMPLE_INTEGER': None
}
@pytest.fixture
def settings_dict_logging(self):
return {
'SIMPLE_SETTINGS': {
'CONFIGURE_LOGGING': True
},
'LOGGING': {
'dummy': 'dict'
}
}
def test_should_autoconfig_python_logging(self, settings_dict_logging):
with patch('logging.config.dictConfig') as mock:
configure_logging(settings_dict_logging)
mock.assert_called_once_with(settings_dict_logging['LOGGING'])
def test_should_dont_autoconfig_python_logging_if_dont_have_special_key(
self, settings_dict_logging
):
settings_dict_logging['SIMPLE_SETTINGS']['CONFIGURE_LOGGING'] = False
with patch('logging.config.dictConfig') as mock:
configure_logging(settings_dict_logging)
assert not mock.called
def test_should_override_by_env(self, settings_dict_to_override):
def mock_env_side_effect(k, d=None):
return u'simple from env' if k == 'SIMPLE_STRING' else d
with patch('os.environ.get', side_effect=mock_env_side_effect):
override_settings_by_env(settings_dict_to_override)
assert settings_dict_to_override['SIMPLE_STRING'] == u'simple from env'
assert settings_dict_to_override['SIMPLE_INTEGER'] == 1
def test_should_dont_override_by_env_if_settings_dont_have_special_key(
self, settings_dict_to_override
):
def mock_env_side_effect(k, d=None):
return u'simple from env' if k == 'SIMPLE_STRING' else d
settings_dict_to_override['SIMPLE_SETTINGS']['OVERRIDE_BY_ENV'] = False
with patch('os.environ.get', side_effect=mock_env_side_effect):
override_settings_by_env(settings_dict_to_override)
assert settings_dict_to_override['SIMPLE_STRING'] == u'simple'
def test_required_settings_should_raise_value_error_for_a_lost_setting(
self, settings_dict_required
):
with pytest.raises(ValueError) as exc:
required_settings(settings_dict_required)
assert 'LOST_SETTING' in str(exc.value)
assert 'SIMPLE_STRING' not in str(exc.value)
def test_required_not_none_settings_should_raise_value_error_for_a_none_setting(
self, settings_dict_required_not_none
):
with pytest.raises(ValueError) as exc:
required_not_none_settings(settings_dict_required_not_none)
assert 'SIMPLE_STRING' in str(exc.value)
assert 'SIMPLE_INTEGER' not in str(exc.value)
def test_required_settings_types_should_raise_value_error_for_an_unsupported_type(
self, settings_dict_required_types_unsupported_type
):
with pytest.raises(ValueError) as exc:
required_settings_types(
settings_dict_required_types_unsupported_type
)
assert 'UNSUPPORTED_TYPE' in str(exc.value)
assert 'SIMPLE_INTEGER' not in str(exc.value)
def test_required_settings_types_should_raise_value_error_for_invalid_types(
self, settings_dict_required_types_invalid_types
):
with pytest.raises(ValueError) as exc:
required_settings_types(settings_dict_required_types_invalid_types)
assert 'SIMPLE_INTEGER' in str(exc.value)
assert 'SIMPLE_BOOL' in str(exc.value)
def test_required_settings_types_should_not_raise_value_error_for_valid_types(
self, settings_dict_required_types_valid_types
):
required_settings_types(settings_dict_required_types_valid_types)
def converted_value(key):
return settings_dict_required_types_valid_types[key]
assert converted_value('STRING_NONE') is None
assert converted_value('INTEGER_NONE') is None
assert converted_value('FLOAT_NONE') is None
assert converted_value('BOOL_NONE') is None
assert isinstance(converted_value('STRING_NATIVE'), str)
assert isinstance(converted_value('INTEGER_NATIVE'), int)
assert isinstance(converted_value('INTEGER_PARSED'), int)
assert isinstance(converted_value('FLOAT_NATIVE'), float)
assert isinstance(converted_value('FLOAT_PARSED'), float)
assert isinstance(converted_value('BOOL_NATIVE'), bool)
assert isinstance(converted_value('BOOL_PARSED_1'), bool)
assert isinstance(converted_value('BOOL_PARSED_3'), bool)
assert isinstance(converted_value('BOOL_PARSED_4'), bool)
assert isinstance(converted_value('BOOL_PARSED_5'), bool)
assert isinstance(converted_value('JSON_LOADS_PARSED_1'), dict)
assert isinstance(converted_value('JSON_LOADS_PARSED_2'), bool)
assert isinstance(converted_value('JSON_LOADS_PARSED_3'), int)
assert converted_value('JSON_LOADS_PARSED_1') == {'simple': 'value'}
assert converted_value('JSON_LOADS_PARSED_2') is True
assert converted_value('JSON_LOADS_PARSED_3') == 1
assert converted_value('JSON_LOADS_PARSED_4') == [1, 3]
assert converted_value('JSON_LOADS_PARSED_5') == [1, 3]
assert converted_value('JSON_LOADS_PARSED_6') == {'simple': 'value'}
def test_override_by_env_and_required_loads_in_correct_order(
self, settings_dict_override_and_required
):
def mock_env_side_effect(k, d=None):
return '1' if k == 'SIMPLE_INTEGER' else d
with patch('os.environ.get', side_effect=mock_env_side_effect):
process_special_settings(settings_dict_override_and_required)
assert settings_dict_override_and_required['SIMPLE_INTEGER'] == 1
def test_should_call_functions_in_process_special_settings(self):
funcs = MagicMock()
settings_dict = {
'SIMPLE_SETTINGS': {
'bar': 'bar_value',
'foo': 'foo_value'
}
}
with patch(
'simple_settings.special_settings.SPECIAL_SETTINGS_MAPPING',
OrderedDict((('foo', funcs.foo), ('bar', funcs.bar)))
):
process_special_settings(settings_dict)
assert funcs.mock_calls == [
call.foo(settings_dict),
call.bar(settings_dict)
]
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cmdShell.go
|
package main
import (
"fmt"
"os"
"strings"
"github.com/Luzifer/awsenv/shellsupport"
log "github.com/Sirupsen/logrus"
"github.com/spf13/cobra"
_ "github.com/Luzifer/awsenv/shellsupport/bash"
_ "github.com/Luzifer/awsenv/shellsupport/fish"
)
func getCmdShell() *cobra.Command {
cmd := cobra.Command{
Use: "shell [environment]",
Short: "print the AWS credentials in a format for your shell to eval()",
Run: actionCmdShell,
}
cmd.Flags().StringVarP(&cfg.Shell.Shell, "shell", "s", os.Getenv("SHELL"), "name of the shell to export for")
cmd.Flags().BoolVarP(&cfg.Shell.Export, "export", "x", true, "Adds proper export options for your shell")
return &cmd
}
func actionCmdShell(cmd *cobra.Command, args []string) {
if len(cfg.Shell.Shell) == 0 {
log.Errorf("Could not determine your shell. Please provide --shell")
os.Exit(1)
}
s := strings.Split(cfg.Shell.Shell, "/")
shell := s[len(s)-1]
log.Debugf("Found shell '%s'", shell)
handler, err := shellsupport.GetShellHandler(shell)
if err != nil {
log.Errorf("Could not find a handler for '%s' shell", shell)
os.Exit(1)
}
if len(args) < 1 {
log.Errorf("Please specify the enviroment to load")
os.Exit(1)
}
if a, ok := awsCredentials.Credentials[args[0]]; ok {
fmt.Println(strings.Join(handler(a, cfg.Shell.Export), "\n"))
os.Exit(0)
}
log.Errorf("Could not find environment '%s'", args[0])
os.Exit(1)
}
|
[
"\"SHELL\""
] |
[] |
[
"SHELL"
] |
[]
|
["SHELL"]
|
go
| 1 | 0 | |
src/main/java/com/google/devtools/build/lib/bugreport/BugReport.java
|
// Copyright 2014 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.devtools.build.lib.bugreport;
import static com.google.common.base.Strings.isNullOrEmpty;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableList;
import com.google.devtools.build.lib.analysis.BlazeVersionInfo;
import com.google.devtools.build.lib.util.CustomExitCodePublisher;
import com.google.devtools.build.lib.util.ExitCode;
import com.google.devtools.build.lib.util.LoggingUtil;
import com.google.devtools.build.lib.util.io.OutErr;
import java.io.PrintStream;
import java.util.Arrays;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.annotation.Nullable;
/**
* Utility methods for handling crashes: we log the crash, optionally send a bug report, and then
* terminate the jvm.
*
* <p> Note, code in this class must be extremely robust. There's nothing worse than a crash-handler
* that itself crashes!
*/
public abstract class BugReport {
private static final Logger logger = Logger.getLogger(BugReport.class.getName());
static final BugReporter REPORTER_INSTANCE = new DefaultBugReporter();
private static BlazeVersionInfo versionInfo = BlazeVersionInfo.instance();
private static BlazeRuntimeInterface runtime = null;
@Nullable private static volatile Throwable unprocessedThrowableInTest = null;
private static final Object LOCK = new Object();
private static final boolean IN_TEST = System.getenv("TEST_TMPDIR") != null;
private static final boolean SHOULD_NOT_SEND_BUG_REPORT_BECAUSE_IN_TEST =
IN_TEST && System.getenv("ENABLE_BUG_REPORT_LOGGING_IN_TEST") == null;
private BugReport() {}
/**
* This is a narrow interface for {@link BugReport}'s usage of BlazeRuntime. It lives in this
* file, for the sake of avoiding a build-time cycle.
*/
public interface BlazeRuntimeInterface {
String getProductName();
/**
* Perform all possible clean-up before crashing, posting events etc. so long as crashing isn't
* significantly delayed or another crash isn't triggered.
*/
void cleanUpForCrash(ExitCode exitCode);
}
public static void setRuntime(BlazeRuntimeInterface newRuntime) {
Preconditions.checkNotNull(newRuntime);
Preconditions.checkState(runtime == null, "runtime already set: %s, %s", runtime, newRuntime);
runtime = newRuntime;
}
private static String getProductName() {
return runtime != null ? runtime.getProductName() : "<unknown>";
}
/**
* In tests, Runtime#halt is disabled. Thus, the main thread should call this method whenever it
* is about to block on thread completion that might hang because of a failed halt below.
*/
public static void maybePropagateUnprocessedThrowableIfInTest() {
if (IN_TEST) {
// Instead of the jvm having been halted, we might have a saved Throwable.
synchronized (LOCK) {
Throwable lastUnprocessedThrowableInTest = unprocessedThrowableInTest;
unprocessedThrowableInTest = null;
if (lastUnprocessedThrowableInTest != null) {
Throwables.throwIfUnchecked(lastUnprocessedThrowableInTest);
throw new RuntimeException(lastUnprocessedThrowableInTest);
}
}
}
}
/**
* Convenience method for {@linkplain #sendBugReport(Throwable, List, String...) sending a bug
* report} without additional arguments.
*/
public static void sendBugReport(Throwable exception) {
sendBugReport(exception, /*args=*/ ImmutableList.of());
}
/**
* Logs the unhandled exception with a special prefix signifying that this was a crash.
*
* @param exception the unhandled exception to display.
* @param args additional values to record in the message.
* @param values Additional string values to clarify the exception.
*/
public static void sendBugReport(Throwable exception, List<String> args, String... values) {
if (SHOULD_NOT_SEND_BUG_REPORT_BECAUSE_IN_TEST) {
Throwables.throwIfUnchecked(exception);
throw new IllegalStateException(
"Bug reports in tests should crash: " + args + ", " + Arrays.toString(values), exception);
}
if (!versionInfo.isReleasedBlaze()) {
logger.info("(Not a released binary; not logged.)");
return;
}
logException(exception, filterArgs(args), values);
}
private static void logCrash(Throwable throwable, boolean sendBugReport, String... args) {
logger.severe("Crash: " + throwable + " " + Throwables.getStackTraceAsString(throwable));
if (sendBugReport) {
BugReport.sendBugReport(throwable, Arrays.asList(args));
}
BugReport.printBug(OutErr.SYSTEM_OUT_ERR, throwable, /* oomMessage = */ null);
System.err.println("ERROR: " + getProductName() + " crash in async thread:");
throwable.printStackTrace();
}
/**
* Print, log, send a bug report, and then cause the current Blaze command to fail with the
* specified exit code, and then cause the jvm to terminate.
*
* <p>Has no effect if another crash has already been handled by {@link BugReport}.
*/
public static void handleCrashWithoutSendingBugReport(
Throwable throwable, ExitCode exitCode, String... args) {
handleCrash(throwable, /*sendBugReport=*/ false, exitCode, args);
}
/**
* Print, log, send a bug report, and then cause the current Blaze command to fail with the
* specified exit code, and then cause the jvm to terminate.
*
* <p>Has no effect if another crash has already been handled by {@link BugReport}.
*/
public static void handleCrash(Throwable throwable, ExitCode exitCode, String... args) {
handleCrash(throwable, /*sendBugReport=*/ true, exitCode, args);
}
/**
* Print, log, and send a bug report, and then cause the current Blaze command to fail with an
* exit code inferred from the given {@link Throwable}, and then cause the jvm to terminate.
*
* <p>Has no effect if another crash has already been handled by {@link BugReport}.
*/
public static RuntimeException handleCrash(Throwable throwable, String... args) {
throw handleCrash(throwable, /*sendBugReport=*/ true, /*exitCode=*/ null, args);
}
private static RuntimeException handleCrash(
Throwable throwable, boolean sendBugReport, @Nullable ExitCode exitCode, String... args) {
ExitCode exitCodeToUse = exitCode == null ? getExitCodeForThrowable(throwable) : exitCode;
int numericExitCode = exitCodeToUse.getNumericExitCode();
try {
synchronized (LOCK) {
if (IN_TEST) {
unprocessedThrowableInTest = throwable;
}
// Don't try to send a bug report during a crash in a test, it will throw itself.
if (!IN_TEST || !sendBugReport) {
logCrash(throwable, sendBugReport, args);
}
try {
if (runtime != null) {
runtime.cleanUpForCrash(exitCodeToUse);
}
CustomExitCodePublisher.maybeWriteExitStatusFile(numericExitCode);
} finally {
// Avoid shutdown deadlock issues: If an application shutdown hook crashes, it will
// trigger our Blaze crash handler (this method). Calling System#exit() here, would
// therefore induce a deadlock. This call would block on the shutdown sequence completing,
// but the shutdown sequence would in turn be blocked on this thread finishing. Instead,
// exit fast via halt().
Runtime.getRuntime().halt(numericExitCode);
}
}
} catch (Throwable t) {
System.err.println(
"ERROR: A crash occurred while "
+ getProductName()
+ " was trying to handle a crash! Please file a bug against "
+ getProductName()
+ " and include the information below.");
System.err.println("Original uncaught exception:");
throwable.printStackTrace(System.err);
System.err.println("Exception encountered during BugReport#handleCrash:");
t.printStackTrace(System.err);
Runtime.getRuntime().halt(numericExitCode);
}
throw new IllegalStateException("never get here", throwable);
}
/** Get exit code corresponding to throwable. */
public static ExitCode getExitCodeForThrowable(Throwable throwable) {
return (Throwables.getRootCause(throwable) instanceof OutOfMemoryError)
? ExitCode.OOM_ERROR
: ExitCode.BLAZE_INTERNAL_ERROR;
}
private static void printThrowableTo(OutErr outErr, Throwable e) {
PrintStream err = new PrintStream(outErr.getErrorStream());
e.printStackTrace(err);
err.flush();
logger.log(Level.SEVERE, getProductName() + " crashed", e);
}
/**
* Print user-helpful information about the bug/crash to the output.
*
* @param outErr where to write the output
* @param e the exception thrown
*/
public static void printBug(OutErr outErr, Throwable e, String oomMessage) {
if (e instanceof OutOfMemoryError) {
outErr.printErr(
e.getMessage()
+ "\n\nERROR: "
+ getProductName()
+ " ran out of memory and crashed."
+ (isNullOrEmpty(oomMessage) ? "" : (" " + oomMessage))
+ "\n");
} else {
printThrowableTo(outErr, e);
}
}
/**
* Filters {@code args} by removing superfluous items:
*
* <ul>
* <li>The client's environment variables may contain sensitive data, so we filter it out.
* <li>{@code --default_override} is spammy.
* </ul>
*/
private static List<String> filterArgs(Iterable<String> args) {
if (args == null) {
return null;
}
ImmutableList.Builder<String> filteredArgs = ImmutableList.builder();
for (String arg : args) {
if (arg != null
&& !arg.startsWith("--client_env=")
&& !arg.startsWith("--default_override=")) {
filteredArgs.add(arg);
}
}
return filteredArgs.build();
}
// Log the exception. Because this method is only called in a blaze release,
// this will result in a report being sent to a remote logging service.
private static void logException(Throwable exception, List<String> args, String... values) {
logger.severe("Exception: " + Throwables.getStackTraceAsString(exception));
// The preamble is used in the crash watcher, so don't change it
// unless you know what you're doing.
String preamble = getProductName()
+ (exception instanceof OutOfMemoryError ? " OOMError: " : " crashed with args: ");
LoggingUtil.logToRemote(Level.SEVERE, preamble + Joiner.on(' ').join(args), exception,
values);
}
private static class DefaultBugReporter implements BugReporter {
@Override
public void sendBugReport(Throwable exception) {
BugReport.sendBugReport(exception);
}
@Override
public void sendBugReport(Throwable exception, List<String> args, String... values) {
BugReport.sendBugReport(exception, args, values);
}
@Override
public RuntimeException handleCrash(Throwable throwable, String... args) {
throw BugReport.handleCrash(throwable, args);
}
}
}
|
[
"\"TEST_TMPDIR\"",
"\"ENABLE_BUG_REPORT_LOGGING_IN_TEST\""
] |
[] |
[
"TEST_TMPDIR",
"ENABLE_BUG_REPORT_LOGGING_IN_TEST"
] |
[]
|
["TEST_TMPDIR", "ENABLE_BUG_REPORT_LOGGING_IN_TEST"]
|
java
| 2 | 0 | |
proxy/proxy.go
|
package proxy
import (
stdlibcontext "context"
"crypto/tls"
"encoding/base64"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httptrace"
"net/http/httputil"
"net/url"
"os"
"runtime"
"strconv"
"strings"
"time"
"github.com/dimfeld/httppath"
ot "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"github.com/zalando/skipper/circuit"
"github.com/zalando/skipper/eskip"
"github.com/zalando/skipper/filters"
al "github.com/zalando/skipper/filters/accesslog"
circuitfilters "github.com/zalando/skipper/filters/circuit"
flowidFilter "github.com/zalando/skipper/filters/flowid"
ratelimitfilters "github.com/zalando/skipper/filters/ratelimit"
tracingfilter "github.com/zalando/skipper/filters/tracing"
"github.com/zalando/skipper/loadbalancer"
"github.com/zalando/skipper/logging"
"github.com/zalando/skipper/metrics"
"github.com/zalando/skipper/proxy/fastcgi"
"github.com/zalando/skipper/ratelimit"
"github.com/zalando/skipper/rfc"
"github.com/zalando/skipper/routing"
"github.com/zalando/skipper/scheduler"
"github.com/zalando/skipper/tracing"
)
const (
proxyBufferSize = 8192
unknownRouteID = "_unknownroute_"
unknownRouteBackendType = "<unknown>"
unknownRouteBackend = "<unknown>"
backendIsProxyHeader = "X-Skipper-Proxy"
// Number of loops allowed by default.
DefaultMaxLoopbacks = 9
// The default value set for http.Transport.MaxIdleConnsPerHost.
DefaultIdleConnsPerHost = 64
// The default period at which the idle connections are forcibly
// closed.
DefaultCloseIdleConnsPeriod = 20 * time.Second
// DefaultResponseHeaderTimeout, the default response header timeout
DefaultResponseHeaderTimeout = 60 * time.Second
// DefaultExpectContinueTimeout, the default timeout to expect
// a response for a 100 Continue request
DefaultExpectContinueTimeout = 30 * time.Second
)
// Flags control the behavior of the proxy.
type Flags uint
const (
FlagsNone Flags = 0
// Insecure causes the proxy to ignore the verification of
// the TLS certificates of the backend services.
Insecure Flags = 1 << iota
// PreserveOriginal indicates that filters require the
// preserved original metadata of the request and the response.
PreserveOriginal
// PreserveHost indicates whether the outgoing request to the
// backend should use by default the 'Host' header of the incoming
// request, or the host part of the backend address, in case filters
// don't change it.
PreserveHost
// Debug indicates that the current proxy instance will be used as a
// debug proxy. Debug proxies don't forward the request to the
// route backends, but they execute all filters, and return a
// JSON document with the changes the filters make to the request
// and with the approximate changes they would make to the
// response.
Debug
// HopHeadersRemoval indicates whether the Hop Headers should be removed
// in compliance with RFC 2616
HopHeadersRemoval
// PatchPath instructs the proxy to patch the parsed request path
// if the reserved characters according to RFC 2616 and RFC 3986
// were unescaped by the parser.
PatchPath
)
// Options are deprecated alias for Flags.
type Options Flags
const (
OptionsNone = Options(FlagsNone)
OptionsInsecure = Options(Insecure)
OptionsPreserveOriginal = Options(PreserveOriginal)
OptionsPreserveHost = Options(PreserveHost)
OptionsDebug = Options(Debug)
OptionsHopHeadersRemoval = Options(HopHeadersRemoval)
)
type OpenTracingParams struct {
// Tracer holds the tracer enabled for this proxy instance
Tracer ot.Tracer
// InitialSpan can override the default initial, pre-routing, span name.
// Default: "ingress".
InitialSpan string
// LogFilterEvents enables the behavior to mark start and completion times of filters
// on the span representing request filters being processed.
// Default: false
LogFilterEvents bool
// LogStreamEvents enables the logs that marks the times when response headers & payload are streamed to
// the client
// Default: false
LogStreamEvents bool
// ExcludeTags controls what tags are disabled. Any tag that is listed here will be ignored.
ExcludeTags []string
}
// Proxy initialization options.
type Params struct {
// The proxy expects a routing instance that is used to match
// the incoming requests to routes.
Routing *routing.Routing
// Control flags. See the Flags values.
Flags Flags
// And optional list of priority routes to be used for matching
// before the general lookup tree.
PriorityRoutes []PriorityRoute
// Enable the experimental upgrade protocol feature
ExperimentalUpgrade bool
// ExperimentalUpgradeAudit enables audit log of both the request line
// and the response messages during web socket upgrades.
ExperimentalUpgradeAudit bool
// When set, no access log is printed.
AccessLogDisabled bool
// DualStack sets if the proxy TCP connections to the backend should be dual stack
DualStack bool
// DefaultHTTPStatus is the HTTP status used when no routes are found
// for a request.
DefaultHTTPStatus int
// MaxLoopbacks sets the maximum number of allowed loops. If 0
// the default (9) is applied. To disable looping, set it to
// -1. Note, that disabling looping by this option, may result
// wrong routing depending on the current configuration.
MaxLoopbacks int
// Same as net/http.Transport.MaxIdleConnsPerHost, but the default
// is 64. This value supports scenarios with relatively few remote
// hosts. When the routing table contains different hosts in the
// range of hundreds, it is recommended to set this options to a
// lower value.
IdleConnectionsPerHost int
// MaxIdleConns limits the number of idle connections to all backends, 0 means no limit
MaxIdleConns int
// DisableHTTPKeepalives forces backend to always create a new connection
DisableHTTPKeepalives bool
// CircuitBreakers provides a registry that skipper can use to
// find the matching circuit breaker for backend requests. If not
// set, no circuit breakers are used.
CircuitBreakers *circuit.Registry
// RateLimiters provides a registry that skipper can use to
// find the matching ratelimiter for backend requests. If not
// set, no ratelimits are used.
RateLimiters *ratelimit.Registry
// LoadBalancer to report unhealthy or dead backends to
LoadBalancer *loadbalancer.LB
// Defines the time period of how often the idle connections are
// forcibly closed. The default is 12 seconds. When set to less than
// 0, the proxy doesn't force closing the idle connections.
CloseIdleConnsPeriod time.Duration
// The Flush interval for copying upgraded connections
FlushInterval time.Duration
// Timeout sets the TCP client connection timeout for proxy http connections to the backend
Timeout time.Duration
// ResponseHeaderTimeout sets the HTTP response timeout for
// proxy http connections to the backend.
ResponseHeaderTimeout time.Duration
// ExpectContinueTimeout sets the HTTP timeout to expect a
// response for status Code 100 for proxy http connections to
// the backend.
ExpectContinueTimeout time.Duration
// KeepAlive sets the TCP keepalive for proxy http connections to the backend
KeepAlive time.Duration
// TLSHandshakeTimeout sets the TLS handshake timeout for proxy connections to the backend
TLSHandshakeTimeout time.Duration
// Client TLS to connect to Backends
ClientTLS *tls.Config
// OpenTracing contains parameters related to OpenTracing instrumentation. For default values
// check OpenTracingParams
OpenTracing *OpenTracingParams
// CustomHttpRoundTripperWrap provides ability to wrap http.RoundTripper created by skipper.
// http.RoundTripper is used for making outgoing requests (backends)
// It allows to add additional logic (for example tracing) by providing a wrapper function
// which accepts original skipper http.RoundTripper as an argument and returns a wrapped roundtripper
CustomHttpRoundTripperWrap func(http.RoundTripper) http.RoundTripper
}
type (
maxLoopbackError string
ratelimitError string
routeLookupError string
)
func (e maxLoopbackError) Error() string { return string(e) }
func (e ratelimitError) Error() string { return string(e) }
func (e routeLookupError) Error() string { return string(e) }
const (
errMaxLoopbacksReached = maxLoopbackError("max loopbacks reached")
errRatelimit = ratelimitError("ratelimited")
errRouteLookup = routeLookupError("route lookup failed")
)
var (
errRouteLookupFailed = &proxyError{err: errRouteLookup}
errCircuitBreakerOpen = &proxyError{
err: errors.New("circuit breaker open"),
code: http.StatusServiceUnavailable,
additionalHeader: http.Header{"X-Circuit-Open": []string{"true"}},
}
disabledAccessLog = al.AccessLogFilter{Enable: false, Prefixes: nil}
enabledAccessLog = al.AccessLogFilter{Enable: true, Prefixes: nil}
hopHeaders = map[string]bool{
"Te": true,
"Connection": true,
"Proxy-Connection": true,
"Keep-Alive": true,
"Proxy-Authenticate": true,
"Proxy-Authorization": true,
"Trailer": true,
"Transfer-Encoding": true,
"Upgrade": true,
}
)
// When set, the proxy will skip the TLS verification on outgoing requests.
func (f Flags) Insecure() bool { return f&Insecure != 0 }
// When set, the filters will receive an unmodified clone of the original
// incoming request and response.
func (f Flags) PreserveOriginal() bool { return f&(PreserveOriginal|Debug) != 0 }
// When set, the proxy will set the, by default, the Host header value
// of the outgoing requests to the one of the incoming request.
func (f Flags) PreserveHost() bool { return f&PreserveHost != 0 }
// When set, the proxy runs in debug mode.
func (f Flags) Debug() bool { return f&Debug != 0 }
// When set, the proxy will remove the Hop Headers
func (f Flags) HopHeadersRemoval() bool { return f&HopHeadersRemoval != 0 }
func (f Flags) patchPath() bool { return f&PatchPath != 0 }
// Priority routes are custom route implementations that are matched against
// each request before the routes in the general lookup tree.
type PriorityRoute interface {
// If the request is matched, returns a route, otherwise nil.
// Additionally it may return a parameter map used by the filters
// in the route.
Match(*http.Request) (*routing.Route, map[string]string)
}
// Proxy instances implement Skipper proxying functionality. For
// initializing, see the WithParams the constructor and Params.
type Proxy struct {
experimentalUpgrade bool
experimentalUpgradeAudit bool
accessLogDisabled bool
maxLoops int
defaultHTTPStatus int
routing *routing.Routing
roundTripper http.RoundTripper
priorityRoutes []PriorityRoute
flags Flags
metrics metrics.Metrics
quit chan struct{}
flushInterval time.Duration
breakers *circuit.Registry
limiters *ratelimit.Registry
log logging.Logger
tracing *proxyTracing
lb *loadbalancer.LB
upgradeAuditLogOut io.Writer
upgradeAuditLogErr io.Writer
auditLogHook chan struct{}
clientTLS *tls.Config
hostname string
}
// proxyError is used to wrap errors during proxying and to indicate
// the required status code for the response sent from the main
// ServeHTTP method. Alternatively, it can indicate that the request
// was already handled, e.g. in case of deprecated shunting or the
// upgrade request.
type proxyError struct {
err error
code int
handled bool
dialingFailed bool
additionalHeader http.Header
}
func (e proxyError) Error() string {
if e.err != nil {
return fmt.Sprintf("dialing failed %v: %v", e.DialError(), e.err.Error())
}
if e.handled {
return "request handled in a non-standard way"
}
code := e.code
if code == 0 {
code = http.StatusInternalServerError
}
return fmt.Sprintf("proxy error: %d", code)
}
// DialError returns true if the error was caused while dialing TCP or
// TLS connections, before HTTP data was sent. It is safe to retry
// a call, if this returns true.
func (e *proxyError) DialError() bool {
return e.dialingFailed
}
func (e *proxyError) NetError() net.Error {
if perr, ok := e.err.(net.Error); ok {
return perr
}
return nil
}
func copyHeader(to, from http.Header) {
for k, v := range from {
to[http.CanonicalHeaderKey(k)] = v
}
}
func copyHeaderExcluding(to, from http.Header, excludeHeaders map[string]bool) {
for k, v := range from {
// The http package converts header names to their canonical version.
// Meaning that the lookup below will be done using the canonical version of the header.
if _, ok := excludeHeaders[k]; !ok {
to[http.CanonicalHeaderKey(k)] = v
}
}
}
func cloneHeader(h http.Header) http.Header {
hh := make(http.Header)
copyHeader(hh, h)
return hh
}
func cloneHeaderExcluding(h http.Header, excludeList map[string]bool) http.Header {
hh := make(http.Header)
copyHeaderExcluding(hh, h, excludeList)
return hh
}
// copies a stream with flushing on every successful read operation
// (similar to io.Copy but with flushing)
func copyStream(to flushedResponseWriter, from io.Reader, tracing *proxyTracing, span ot.Span) error {
b := make([]byte, proxyBufferSize)
for {
l, rerr := from.Read(b)
tracing.logStreamEvent(span, StreamBodyEvent, fmt.Sprintf("%d", l))
if rerr != nil && rerr != io.EOF {
return rerr
}
if l > 0 {
_, werr := to.Write(b[:l])
if werr != nil {
return werr
}
to.Flush()
}
if rerr == io.EOF {
return nil
}
}
}
func schemeFromRequest(r *http.Request) string {
if r.TLS != nil {
return "https"
}
return "http"
}
func setRequestURLFromRequest(u *url.URL, r *http.Request) {
if u.Host == "" {
u.Host = r.Host
}
if u.Scheme == "" {
u.Scheme = schemeFromRequest(r)
}
}
func setRequestURLForDynamicBackend(u *url.URL, stateBag map[string]interface{}) {
dbu, ok := stateBag[filters.DynamicBackendURLKey].(string)
if ok && dbu != "" {
bu, err := url.ParseRequestURI(dbu)
if err == nil {
u.Host = bu.Host
u.Scheme = bu.Scheme
}
} else {
host, ok := stateBag[filters.DynamicBackendHostKey].(string)
if ok && host != "" {
u.Host = host
}
scheme, ok := stateBag[filters.DynamicBackendSchemeKey].(string)
if ok && scheme != "" {
u.Scheme = scheme
}
}
}
func setRequestURLForLoadBalancedBackend(u *url.URL, rt *routing.Route, lbctx *routing.LBContext) {
e := rt.LBAlgorithm.Apply(lbctx)
u.Scheme = e.Scheme
u.Host = e.Host
}
// creates an outgoing http request to be forwarded to the route endpoint
// based on the augmented incoming request
func mapRequest(r *http.Request, rt *routing.Route, host string, removeHopHeaders bool, stateBag map[string]interface{}) (*http.Request, error) {
u := r.URL
switch rt.BackendType {
case eskip.DynamicBackend:
setRequestURLFromRequest(u, r)
setRequestURLForDynamicBackend(u, stateBag)
case eskip.LBBackend:
setRequestURLForLoadBalancedBackend(u, rt, routing.NewLBContext(r, rt))
default:
u.Scheme = rt.Scheme
u.Host = rt.Host
}
body := r.Body
if r.ContentLength == 0 {
body = nil
}
rr, err := http.NewRequest(r.Method, u.String(), body)
if err != nil {
return nil, err
}
rr = rr.WithContext(r.Context())
rr.ContentLength = r.ContentLength
if removeHopHeaders {
rr.Header = cloneHeaderExcluding(r.Header, hopHeaders)
} else {
rr.Header = cloneHeader(r.Header)
}
rr.Host = host
// If there is basic auth configured in the URL we add them as headers
if u.User != nil {
up := u.User.String()
upBase64 := base64.StdEncoding.EncodeToString([]byte(up))
rr.Header.Add("Authorization", fmt.Sprintf("Basic %s", upBase64))
}
if _, ok := stateBag[filters.BackendIsProxyKey]; ok {
forwardToProxy(r, rr)
}
ctxspan := ot.SpanFromContext(r.Context())
if ctxspan != nil {
rr = rr.WithContext(ot.ContextWithSpan(rr.Context(), ctxspan))
}
return rr, nil
}
func forwardToProxy(incoming, outgoing *http.Request) {
proxyURL := &url.URL{
Scheme: outgoing.URL.Scheme,
Host: outgoing.URL.Host,
}
outgoing.URL.Host = incoming.Host
outgoing.URL.Scheme = schemeFromRequest(incoming)
outgoing.Header.Set(backendIsProxyHeader, proxyURL.String())
}
type skipperDialer struct {
net.Dialer
f func(ctx stdlibcontext.Context, network, addr string) (net.Conn, error)
}
func newSkipperDialer(d net.Dialer) *skipperDialer {
return &skipperDialer{
Dialer: d,
f: d.DialContext,
}
}
// DialContext wraps net.Dialer's DialContext and returns an error,
// that can be checked if it was a Transport (TCP/TLS handshake) error
// or timeout, or a timeout from http, which is not in general
// not possible to retry.
func (dc *skipperDialer) DialContext(ctx stdlibcontext.Context, network, addr string) (net.Conn, error) {
span := ot.SpanFromContext(ctx)
if span != nil {
span.LogKV("dial_context", "start")
}
con, err := dc.f(ctx, network, addr)
if span != nil {
span.LogKV("dial_context", "done")
}
if err != nil {
return nil, &proxyError{
err: err,
code: -1, // omit 0 handling in proxy.Error()
dialingFailed: true, // indicate error happened before http
}
} else if cerr := ctx.Err(); cerr != nil {
// unclear when this is being triggered
return nil, &proxyError{
err: fmt.Errorf("err from dial context: %v", cerr),
code: http.StatusGatewayTimeout,
}
}
return con, nil
}
// New returns an initialized Proxy.
// Deprecated, see WithParams and Params instead.
func New(r *routing.Routing, options Options, pr ...PriorityRoute) *Proxy {
return WithParams(Params{
Routing: r,
Flags: Flags(options),
PriorityRoutes: pr,
CloseIdleConnsPeriod: -time.Second,
})
}
// WithParams returns an initialized Proxy.
func WithParams(p Params) *Proxy {
if p.IdleConnectionsPerHost <= 0 {
p.IdleConnectionsPerHost = DefaultIdleConnsPerHost
}
if p.CloseIdleConnsPeriod == 0 {
p.CloseIdleConnsPeriod = DefaultCloseIdleConnsPeriod
}
if p.ResponseHeaderTimeout == 0 {
p.ResponseHeaderTimeout = DefaultResponseHeaderTimeout
}
if p.ExpectContinueTimeout == 0 {
p.ExpectContinueTimeout = DefaultExpectContinueTimeout
}
if p.CustomHttpRoundTripperWrap == nil {
// default wrapper which does nothing
p.CustomHttpRoundTripperWrap = func(original http.RoundTripper) http.RoundTripper {
return original
}
}
tr := &http.Transport{
DialContext: newSkipperDialer(net.Dialer{
Timeout: p.Timeout,
KeepAlive: p.KeepAlive,
DualStack: p.DualStack,
}).DialContext,
TLSHandshakeTimeout: p.TLSHandshakeTimeout,
ResponseHeaderTimeout: p.ResponseHeaderTimeout,
ExpectContinueTimeout: p.ExpectContinueTimeout,
MaxIdleConns: p.MaxIdleConns,
MaxIdleConnsPerHost: p.IdleConnectionsPerHost,
IdleConnTimeout: p.CloseIdleConnsPeriod,
DisableKeepAlives: p.DisableHTTPKeepalives,
Proxy: proxyFromHeader,
}
quit := make(chan struct{})
// We need this to reliably fade on DNS change, which is right
// now not fixed with IdleConnTimeout in the http.Transport.
// https://github.com/golang/go/issues/23427
if p.CloseIdleConnsPeriod > 0 {
go func() {
for {
select {
case <-time.After(p.CloseIdleConnsPeriod):
tr.CloseIdleConnections()
case <-quit:
return
}
}
}()
}
if p.ClientTLS != nil {
tr.TLSClientConfig = p.ClientTLS
}
if p.Flags.Insecure() {
if tr.TLSClientConfig == nil {
/* #nosec */
tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
} else {
/* #nosec */
tr.TLSClientConfig.InsecureSkipVerify = true
}
}
m := metrics.Default
if p.Flags.Debug() {
m = metrics.Void
}
if p.MaxLoopbacks == 0 {
p.MaxLoopbacks = DefaultMaxLoopbacks
} else if p.MaxLoopbacks < 0 {
p.MaxLoopbacks = 0
}
defaultHTTPStatus := http.StatusNotFound
if p.DefaultHTTPStatus >= http.StatusContinue && p.DefaultHTTPStatus <= http.StatusNetworkAuthenticationRequired {
defaultHTTPStatus = p.DefaultHTTPStatus
}
hostname := os.Getenv("HOSTNAME")
return &Proxy{
routing: p.Routing,
roundTripper: p.CustomHttpRoundTripperWrap(tr),
priorityRoutes: p.PriorityRoutes,
flags: p.Flags,
metrics: m,
quit: quit,
flushInterval: p.FlushInterval,
experimentalUpgrade: p.ExperimentalUpgrade,
experimentalUpgradeAudit: p.ExperimentalUpgradeAudit,
maxLoops: p.MaxLoopbacks,
breakers: p.CircuitBreakers,
lb: p.LoadBalancer,
limiters: p.RateLimiters,
log: &logging.DefaultLog{},
defaultHTTPStatus: defaultHTTPStatus,
tracing: newProxyTracing(p.OpenTracing),
accessLogDisabled: p.AccessLogDisabled,
upgradeAuditLogOut: os.Stdout,
upgradeAuditLogErr: os.Stderr,
clientTLS: tr.TLSClientConfig,
hostname: hostname,
}
}
var caughtPanic = false
// tryCatch executes function `p` and `onErr` if `p` panics
// onErr will receive a stack trace string of the first panic
// further panics are ignored for efficiency reasons
func tryCatch(p func(), onErr func(err interface{}, stack string)) {
defer func() {
if err := recover(); err != nil {
s := ""
if !caughtPanic {
buf := make([]byte, 1024)
l := runtime.Stack(buf, false)
s = string(buf[:l])
caughtPanic = true
}
onErr(err, s)
}
}()
p()
}
func proxyFromHeader(req *http.Request) (*url.URL, error) {
if u := req.Header.Get(backendIsProxyHeader); u != "" {
req.Header.Del(backendIsProxyHeader)
return url.Parse(u)
}
return nil, nil
}
// applies filters to a request
func (p *Proxy) applyFiltersToRequest(f []*routing.RouteFilter, ctx *context) []*routing.RouteFilter {
if len(f) == 0 {
return f
}
filtersStart := time.Now()
filtersSpan := tracing.CreateSpan("request_filters", ctx.request.Context(), p.tracing.tracer)
defer filtersSpan.Finish()
ctx.parentSpan = filtersSpan
var filters = make([]*routing.RouteFilter, 0, len(f))
for _, fi := range f {
start := time.Now()
p.tracing.logFilterStart(filtersSpan, fi.Name)
tryCatch(func() {
ctx.setMetricsPrefix(fi.Name)
fi.Request(ctx)
p.metrics.MeasureFilterRequest(fi.Name, start)
}, func(err interface{}, stack string) {
if p.flags.Debug() {
// these errors are collected for the debug mode to be able
// to report in the response which filters failed.
ctx.debugFilterPanics = append(ctx.debugFilterPanics, err)
return
}
p.log.Errorf("error while processing filter during request: %s: %v (%s)", fi.Name, err, stack)
})
p.tracing.logFilterEnd(filtersSpan, fi.Name)
filters = append(filters, fi)
if ctx.deprecatedShunted() || ctx.shunted() {
break
}
}
p.metrics.MeasureAllFiltersRequest(ctx.route.Id, filtersStart)
return filters
}
// applies filters to a response in reverse order
func (p *Proxy) applyFiltersToResponse(filters []*routing.RouteFilter, ctx *context) {
filtersStart := time.Now()
filtersSpan := tracing.CreateSpan("response_filters", ctx.request.Context(), p.tracing.tracer)
defer filtersSpan.Finish()
ctx.parentSpan = filtersSpan
last := len(filters) - 1
for i := range filters {
fi := filters[last-i]
start := time.Now()
p.tracing.logFilterStart(filtersSpan, fi.Name)
tryCatch(func() {
ctx.setMetricsPrefix(fi.Name)
fi.Response(ctx)
p.metrics.MeasureFilterResponse(fi.Name, start)
}, func(err interface{}, stack string) {
if p.flags.Debug() {
// these errors are collected for the debug mode to be able
// to report in the response which filters failed.
ctx.debugFilterPanics = append(ctx.debugFilterPanics, err)
return
}
p.log.Errorf("error while processing filters during response: %s: %v (%s)", fi.Name, err, stack)
})
p.tracing.logFilterEnd(filtersSpan, fi.Name)
}
p.metrics.MeasureAllFiltersResponse(ctx.route.Id, filtersStart)
}
// addBranding overwrites any existing `X-Powered-By` or `Server` header from headerMap
func addBranding(headerMap http.Header) {
if headerMap.Get("Server") == "" {
headerMap.Set("Server", "Skipper")
}
}
func (p *Proxy) lookupRoute(ctx *context) (rt *routing.Route, params map[string]string) {
for _, prt := range p.priorityRoutes {
rt, params = prt.Match(ctx.request)
if rt != nil {
return rt, params
}
}
return ctx.routeLookup.Do(ctx.request)
}
// send a premature error response
func (p *Proxy) sendError(c *context, id string, code int) {
addBranding(c.responseWriter.Header())
text := http.StatusText(code) + "\n"
c.responseWriter.Header().Set("Content-Length", strconv.Itoa(len(text)))
c.responseWriter.Header().Set("Content-Type", "text/plain; charset=utf-8")
c.responseWriter.Header().Set("X-Content-Type-Options", "nosniff")
c.responseWriter.WriteHeader(code)
c.responseWriter.Write([]byte(text))
p.metrics.MeasureServe(
id,
c.metricsHost(),
c.request.Method,
code,
c.startServe,
)
}
func (p *Proxy) makeUpgradeRequest(ctx *context, req *http.Request) error {
backendURL := req.URL
reverseProxy := httputil.NewSingleHostReverseProxy(backendURL)
reverseProxy.FlushInterval = p.flushInterval
upgradeProxy := upgradeProxy{
backendAddr: backendURL,
reverseProxy: reverseProxy,
insecure: p.flags.Insecure(),
tlsClientConfig: p.clientTLS,
useAuditLog: p.experimentalUpgradeAudit,
auditLogOut: p.upgradeAuditLogOut,
auditLogErr: p.upgradeAuditLogErr,
auditLogHook: p.auditLogHook,
}
upgradeProxy.serveHTTP(ctx.responseWriter, req)
ctx.successfulUpgrade = true
p.log.Debugf("finished upgraded protocol %s session", getUpgradeRequest(ctx.request))
return nil
}
func (p *Proxy) makeBackendRequest(ctx *context) (*http.Response, *proxyError) {
var err error
req, err := mapRequest(ctx.request, ctx.route, ctx.outgoingHost, p.flags.HopHeadersRemoval(), ctx.StateBag())
if err != nil {
p.log.Errorf("could not map backend request, caused by: %v", err)
return nil, &proxyError{err: err}
}
if p.experimentalUpgrade && isUpgradeRequest(req) {
if err = p.makeUpgradeRequest(ctx, req); err != nil {
return nil, &proxyError{err: err}
}
// We are not owner of the connection anymore.
return nil, &proxyError{handled: true}
}
bag := ctx.StateBag()
spanName, ok := bag[tracingfilter.OpenTracingProxySpanKey].(string)
if !ok {
spanName = "proxy"
}
ctx.proxySpan = tracing.CreateSpan(spanName, req.Context(), p.tracing.tracer)
p.tracing.
setTag(ctx.proxySpan, SpanKindTag, SpanKindClient).
setTag(ctx.proxySpan, SkipperRouteIDTag, ctx.route.Id).
setTag(ctx.proxySpan, SkipperRouteTag, ctx.route.String())
u := cloneURL(req.URL)
u.RawQuery = ""
p.setCommonSpanInfo(u, req, ctx.proxySpan)
carrier := ot.HTTPHeadersCarrier(req.Header)
_ = p.tracing.tracer.Inject(ctx.proxySpan.Context(), ot.HTTPHeaders, carrier)
req = req.WithContext(ot.ContextWithSpan(req.Context(), ctx.proxySpan))
p.metrics.IncCounter("outgoing." + req.Proto)
ctx.proxySpan.LogKV("http_roundtrip", StartEvent)
req = injectClientTrace(req, ctx.proxySpan)
var response *http.Response
switch req.URL.Scheme {
case "fastcgi":
f := "index.php"
if sf, ok := ctx.StateBag()["fastCgiFilename"]; ok {
f = sf.(string)
}
rt, err := fastcgi.NewRoundTripper(p.log, req.URL.Host, f)
if err != nil {
p.log.Errorf("Failed to create fastcgi roundtripper: %v", err)
return nil, &proxyError{err: err}
}
// FastCgi expects the Host to be in form host:port
// It will then be split and added as 2 separate params to the backend process
if _, _, err := net.SplitHostPort(req.Host); err != nil {
req.Host = req.Host + ":" + req.URL.Port()
}
// RemoteAddr is needed to pass to the backend process as param
req.RemoteAddr = ctx.request.RemoteAddr
response, err = rt.RoundTrip(req)
if err != nil {
p.log.Errorf("Failed to roundtrip to fastcgi: %v", err)
return nil, &proxyError{err: err}
}
default:
response, err = p.roundTripper.RoundTrip(req)
}
ctx.proxySpan.LogKV("http_roundtrip", EndEvent)
if err != nil {
p.tracing.setTag(ctx.proxySpan, ErrorTag, true)
ctx.proxySpan.LogKV(
"event", "error",
"message", err.Error())
if perr, ok := err.(*proxyError); ok {
p.log.Errorf("Failed to do backend roundtrip to %s: %v", ctx.route.Backend, perr)
//p.lb.AddHealthcheck(ctx.route.Backend)
return nil, perr
} else if nerr, ok := err.(net.Error); ok {
p.log.Errorf("net.Error during backend roundtrip to %s: timeout=%v temporary=%v: %v", ctx.route.Backend, nerr.Timeout(), nerr.Temporary(), err)
//p.lb.AddHealthcheck(ctx.route.Backend)
if nerr.Timeout() {
p.tracing.setTag(ctx.proxySpan, HTTPStatusCodeTag, uint16(http.StatusGatewayTimeout))
return nil, &proxyError{
err: err,
code: http.StatusGatewayTimeout,
}
} else if !nerr.Temporary() {
p.tracing.setTag(ctx.proxySpan, HTTPStatusCodeTag, uint16(http.StatusServiceUnavailable))
return nil, &proxyError{
err: err,
code: http.StatusServiceUnavailable,
}
} else if !nerr.Timeout() && nerr.Temporary() {
p.log.Errorf("Backend error see https://github.com/zalando/skipper/issues/768: %v", err)
p.tracing.setTag(ctx.proxySpan, HTTPStatusCodeTag, uint16(http.StatusServiceUnavailable))
return nil, &proxyError{
err: err,
code: http.StatusServiceUnavailable,
}
} else {
p.tracing.setTag(ctx.proxySpan, HTTPStatusCodeTag, uint16(http.StatusInternalServerError))
return nil, &proxyError{
err: err,
code: http.StatusInternalServerError,
}
}
}
if cerr := req.Context().Err(); cerr != nil {
// deadline exceeded or canceled in stdlib, proxy client closed request
// see https://github.com/zalando/skipper/issues/687#issuecomment-405557503
return nil, &proxyError{err: cerr, code: 499}
}
p.log.Errorf("Unexpected error from Go stdlib net/http package during roundtrip: %v", err)
return nil, &proxyError{err: err}
}
p.tracing.setTag(ctx.proxySpan, HTTPStatusCodeTag, uint16(response.StatusCode))
return response, nil
}
// checkRatelimit is used in case of a route ratelimit
// configuration. It returns the used ratelimit.Settings and 0 if
// the request passed in the context should be allowed.
// otherwise it returns the used ratelimit.Settings and the retry-after period.
func (p *Proxy) checkRatelimit(ctx *context) (ratelimit.Settings, int) {
if p.limiters == nil {
return ratelimit.Settings{}, 0
}
settings, ok := ctx.stateBag[ratelimitfilters.RouteSettingsKey].([]ratelimit.Settings)
if !ok || len(settings) < 1 {
return ratelimit.Settings{}, 0
}
for _, setting := range settings {
rl := p.limiters.Get(setting)
if rl == nil {
p.log.Errorf("RateLimiter is nil for setting: %s", setting)
continue
}
if setting.Lookuper == nil {
p.log.Errorf("Lookuper is nil for setting: %s", setting)
continue
}
s := setting.Lookuper.Lookup(ctx.Request())
if s == "" {
p.log.Debugf("Lookuper found no data in request for setting: %s and request: %v", setting, ctx.Request())
continue
}
if !rl.AllowContext(ctx.Request().Context(), s) {
return setting, rl.RetryAfter(s)
}
}
return ratelimit.Settings{}, 0
}
func (p *Proxy) checkBreaker(c *context) (func(bool), bool) {
if p.breakers == nil {
return nil, true
}
settings, _ := c.stateBag[circuitfilters.RouteSettingsKey].(circuit.BreakerSettings)
settings.Host = c.outgoingHost
b := p.breakers.Get(settings)
if b == nil {
return nil, true
}
done, ok := b.Allow()
return done, ok
}
func newRatelimitError(settings ratelimit.Settings, retryAfter int) error {
return &proxyError{
err: errRatelimit,
code: http.StatusTooManyRequests,
additionalHeader: http.Header{
ratelimit.Header: []string{strconv.Itoa(settings.MaxHits * int(time.Hour/settings.TimeWindow))},
ratelimit.RetryAfterHeader: []string{strconv.Itoa(retryAfter)},
},
}
}
func (p *Proxy) do(ctx *context) error {
if ctx.executionCounter > p.maxLoops {
return errMaxLoopbacksReached
}
defer func() {
pendingLIFO, _ := ctx.StateBag()[scheduler.LIFOKey].([]func())
for _, done := range pendingLIFO {
done()
}
}()
// proxy global setting
if !ctx.wasExecuted() {
if settings, retryAfter := p.limiters.Check(ctx.request); retryAfter > 0 {
rerr := newRatelimitError(settings, retryAfter)
return rerr
}
}
// every time the context is used for a request the context executionCounter is incremented
// a context executionCounter equal to zero represents a root context.
ctx.executionCounter++
lookupStart := time.Now()
route, params := p.lookupRoute(ctx)
p.metrics.MeasureRouteLookup(lookupStart)
if route == nil {
if !p.flags.Debug() {
p.metrics.IncRoutingFailures()
}
p.log.Debugf("could not find a route for %v", ctx.request.URL)
return errRouteLookupFailed
}
ctx.applyRoute(route, params, p.flags.PreserveHost())
processedFilters := p.applyFiltersToRequest(ctx.route.Filters, ctx)
// per route rate limit
if settings, retryAfter := p.checkRatelimit(ctx); retryAfter > 0 {
rerr := newRatelimitError(settings, retryAfter)
return rerr
}
if ctx.deprecatedShunted() {
p.log.Debugf("deprecated shunting detected in route: %s", ctx.route.Id)
return &proxyError{handled: true}
} else if ctx.shunted() || ctx.route.Shunt || ctx.route.BackendType == eskip.ShuntBackend {
// consume the body to prevent goroutine leaks
if ctx.request.Body != nil {
if _, err := io.Copy(ioutil.Discard, ctx.request.Body); err != nil {
p.log.Errorf("error while discarding remainder request body: %v.", err)
}
}
ctx.ensureDefaultResponse()
} else if ctx.route.BackendType == eskip.LoopBackend {
loopCTX := ctx.clone()
if err := p.do(loopCTX); err != nil {
return err
}
ctx.setResponse(loopCTX.response, p.flags.PreserveOriginal())
ctx.proxySpan = loopCTX.proxySpan
} else if p.flags.Debug() {
debugReq, err := mapRequest(ctx.request, ctx.route, ctx.outgoingHost, p.flags.HopHeadersRemoval(), ctx.StateBag())
if err != nil {
return &proxyError{err: err}
}
ctx.outgoingDebugRequest = debugReq
ctx.setResponse(&http.Response{Header: make(http.Header)}, p.flags.PreserveOriginal())
} else {
done, allow := p.checkBreaker(ctx)
if !allow {
tracing.LogKV("circuit_breaker", "open", ctx.request.Context())
return errCircuitBreakerOpen
}
backendStart := time.Now()
rsp, perr := p.makeBackendRequest(ctx)
if perr != nil {
if done != nil {
done(false)
}
p.metrics.IncErrorsBackend(ctx.route.Id)
if retryable(ctx.Request()) && perr.DialError() && ctx.route.BackendType == eskip.LBBackend {
if ctx.proxySpan != nil {
ctx.proxySpan.Finish()
ctx.proxySpan = nil
}
tracing.LogKV("retry", ctx.route.Id, ctx.Request().Context())
perr = nil
var perr2 *proxyError
rsp, perr2 = p.makeBackendRequest(ctx)
if perr2 != nil {
p.log.Errorf("Failed to do retry backend request: %v", perr2)
if perr2.code >= http.StatusInternalServerError {
p.metrics.MeasureBackend5xx(backendStart)
}
return perr2
}
} else {
return perr
}
}
if rsp.StatusCode >= http.StatusInternalServerError {
p.metrics.MeasureBackend5xx(backendStart)
}
if done != nil {
done(rsp.StatusCode < http.StatusInternalServerError)
}
ctx.setResponse(rsp, p.flags.PreserveOriginal())
p.metrics.MeasureBackend(ctx.route.Id, backendStart)
p.metrics.MeasureBackendHost(ctx.route.Host, backendStart)
}
addBranding(ctx.response.Header)
p.applyFiltersToResponse(processedFilters, ctx)
return nil
}
func retryable(req *http.Request) bool {
return req != nil && (req.Body == nil || req.Body == http.NoBody)
}
func (p *Proxy) serveResponse(ctx *context) {
if p.flags.Debug() {
dbgResponse(ctx.responseWriter, &debugInfo{
route: &ctx.route.Route,
incoming: ctx.originalRequest,
outgoing: ctx.outgoingDebugRequest,
response: ctx.response,
filterPanics: ctx.debugFilterPanics,
})
return
}
start := time.Now()
p.tracing.logStreamEvent(ctx.proxySpan, StreamHeadersEvent, StartEvent)
copyHeader(ctx.responseWriter.Header(), ctx.response.Header)
p.tracing.logStreamEvent(ctx.proxySpan, StreamHeadersEvent, EndEvent)
if err := ctx.Request().Context().Err(); err != nil {
// deadline exceeded or canceled in stdlib, client closed request
// see https://github.com/zalando/skipper/pull/864
p.log.Infof("Client request: %v", err)
ctx.response.StatusCode = 499
p.tracing.setTag(ctx.proxySpan, ClientRequestStateTag, ClientRequestCanceled)
}
ctx.responseWriter.WriteHeader(ctx.response.StatusCode)
ctx.responseWriter.Flush()
err := copyStream(ctx.responseWriter, ctx.response.Body, p.tracing, ctx.proxySpan)
if err != nil {
p.metrics.IncErrorsStreaming(ctx.route.Id)
p.log.Errorf("error while copying the response stream: %v", err)
p.tracing.setTag(ctx.proxySpan, ErrorTag, true)
p.tracing.setTag(ctx.proxySpan, StreamBodyEvent, StreamBodyError)
p.tracing.logStreamEvent(ctx.proxySpan, StreamBodyEvent, fmt.Sprintf("Failed to stream response: %v", err))
} else {
p.metrics.MeasureResponse(ctx.response.StatusCode, ctx.request.Method, ctx.route.Id, start)
}
}
func (p *Proxy) errorResponse(ctx *context, err error) {
perr, ok := err.(*proxyError)
if ok && perr.handled {
return
}
flowIdLog := ""
flowId := ctx.Request().Header.Get(flowidFilter.HeaderName)
if flowId != "" {
flowIdLog = fmt.Sprintf(", flow id %s", flowId)
}
id := unknownRouteID
backendType := unknownRouteBackendType
backend := unknownRouteBackend
if ctx.route != nil {
id = ctx.route.Id
backendType = ctx.route.BackendType.String()
backend = fmt.Sprintf("%s://%s", ctx.request.URL.Scheme, ctx.request.URL.Host)
}
code := http.StatusInternalServerError
if ok && perr.code != 0 {
if perr.code == -1 { // -1 == dial connection refused
code = http.StatusBadGateway
} else {
code = perr.code
}
}
if span := ot.SpanFromContext(ctx.Request().Context()); span != nil {
p.tracing.setTag(span, HTTPStatusCodeTag, uint16(code))
}
if p.flags.Debug() {
di := &debugInfo{
incoming: ctx.originalRequest,
outgoing: ctx.outgoingDebugRequest,
response: ctx.response,
err: err,
filterPanics: ctx.debugFilterPanics,
}
if ctx.route != nil {
di.route = &ctx.route.Route
}
dbgResponse(ctx.responseWriter, di)
return
}
if ok && len(perr.additionalHeader) > 0 {
copyHeader(ctx.responseWriter.Header(), perr.additionalHeader)
}
switch {
case err == errRouteLookupFailed:
code = p.defaultHTTPStatus
case ok && perr.err == errRatelimit:
code = perr.code
case code == 499:
req := ctx.Request()
remoteAddr := remoteHost(req)
uri := req.RequestURI
if i := strings.IndexRune(uri, '?'); i >= 0 {
uri = uri[:i]
}
p.log.Errorf(
`client canceled after %v, route %s with backend %s %s%s, status code %d: %v, remote host: %s, request: "%s %s %s", user agent: "%s"`,
time.Since(ctx.startServe),
id,
backendType,
backend,
flowIdLog,
code,
err,
remoteAddr,
req.Method,
uri,
req.Proto,
req.UserAgent(),
)
default:
req := ctx.Request()
remoteAddr := remoteHost(req)
uri := req.RequestURI
if i := strings.IndexRune(uri, '?'); i >= 0 {
uri = uri[:i]
}
p.log.Errorf(
`error while proxying after %v, route %s with backend %s %s%s, status code %d: %v, remote host: %s, request: "%s %s %s", user agent: "%s"`,
time.Since(ctx.startServe),
id,
backendType,
backend,
flowIdLog,
code,
err,
remoteAddr,
req.Method,
uri,
req.Proto,
req.UserAgent(),
)
}
p.sendError(ctx, id, code)
}
// strip port from addresses with hostname, ipv4 or ipv6
func stripPort(address string) string {
if h, _, err := net.SplitHostPort(address); err == nil {
return h
}
return address
}
// The remote address of the client. When the 'X-Forwarded-For'
// header is set, then it is used instead.
func remoteAddr(r *http.Request) string {
ff := r.Header.Get("X-Forwarded-For")
if ff != "" {
return ff
}
return r.RemoteAddr
}
func remoteHost(r *http.Request) string {
a := remoteAddr(r)
return stripPort(a)
}
func shouldLog(statusCode int, filter *al.AccessLogFilter) bool {
if len(filter.Prefixes) == 0 {
return filter.Enable
}
match := false
for _, prefix := range filter.Prefixes {
switch {
case prefix < 10:
match = (statusCode >= prefix*100 && statusCode < (prefix+1)*100)
case prefix < 100:
match = (statusCode >= prefix*10 && statusCode < (prefix+1)*10)
default:
match = statusCode == prefix
}
if match {
break
}
}
return match == filter.Enable
}
// http.Handler implementation
func (p *Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
lw := logging.NewLoggingWriter(w)
p.metrics.IncCounter("incoming." + r.Proto)
var ctx *context
var span ot.Span
wireContext, err := p.tracing.tracer.Extract(ot.HTTPHeaders, ot.HTTPHeadersCarrier(r.Header))
if err == nil {
span = p.tracing.tracer.StartSpan(p.tracing.initialOperationName, ext.RPCServerOption(wireContext))
} else {
span = p.tracing.tracer.StartSpan(p.tracing.initialOperationName)
err = nil
}
defer func() {
if ctx != nil && ctx.proxySpan != nil {
ctx.proxySpan.Finish()
}
span.Finish()
}()
defer func() {
accessLogEnabled, ok := ctx.stateBag[al.AccessLogEnabledKey].(*al.AccessLogFilter)
if !ok {
if p.accessLogDisabled {
accessLogEnabled = &disabledAccessLog
} else {
accessLogEnabled = &enabledAccessLog
}
}
statusCode := lw.GetCode()
if shouldLog(statusCode, accessLogEnabled) {
entry := &logging.AccessEntry{
Request: r,
ResponseSize: lw.GetBytes(),
StatusCode: statusCode,
RequestTime: ctx.startServe,
Duration: time.Since(ctx.startServe),
}
additionalData, _ := ctx.stateBag[al.AccessLogAdditionalDataKey].(map[string]interface{})
logging.LogAccess(entry, additionalData)
}
// This flush is required in I/O error
if ctx.successfulUpgrade {
lw.Flush()
}
}()
// Change /foo/../bar to /bar for matching and passing upstream
r.URL.Path = httppath.Clean(r.URL.Path)
if p.flags.patchPath() {
r.URL.Path = rfc.PatchPath(r.URL.Path, r.URL.RawPath)
}
p.tracing.setTag(span, SpanKindTag, SpanKindServer)
p.setCommonSpanInfo(r.URL, r, span)
r = r.WithContext(ot.ContextWithSpan(r.Context(), span))
ctx = newContext(lw, r, p)
ctx.startServe = time.Now()
ctx.tracer = p.tracing.tracer
defer func() {
if ctx.response != nil && ctx.response.Body != nil {
err := ctx.response.Body.Close()
if err != nil {
p.log.Errorf("error during closing the response body: %v", err)
}
}
}()
err = p.do(ctx)
if err != nil {
p.tracing.setTag(span, ErrorTag, true)
p.errorResponse(ctx, err)
return
}
p.serveResponse(ctx)
p.metrics.MeasureServe(
ctx.route.Id,
ctx.metricsHost(),
r.Method,
ctx.response.StatusCode,
ctx.startServe,
)
}
// Close causes the proxy to stop closing idle
// connections and, currently, has no other effect.
// It's primary purpose is to support testing.
func (p *Proxy) Close() error {
close(p.quit)
return nil
}
func (p *Proxy) setCommonSpanInfo(u *url.URL, r *http.Request, s ot.Span) {
p.tracing.
setTag(s, ComponentTag, "skipper").
setTag(s, HTTPUrlTag, u.String()).
setTag(s, HTTPMethodTag, r.Method).
setTag(s, HostnameTag, p.hostname).
setTag(s, HTTPRemoteAddrTag, r.RemoteAddr).
setTag(s, HTTPPathTag, u.Path).
setTag(s, HTTPHostTag, r.Host)
if val := r.Header.Get("X-Flow-Id"); val != "" {
p.tracing.setTag(s, FlowIDTag, val)
}
}
// TODO(sszuecs): copy from net.Client, we should refactor this to use net.Client
func injectClientTrace(req *http.Request, span ot.Span) *http.Request {
trace := &httptrace.ClientTrace{
DNSStart: func(httptrace.DNSStartInfo) {
span.LogKV("DNS", "start")
},
DNSDone: func(httptrace.DNSDoneInfo) {
span.LogKV("DNS", "end")
},
ConnectStart: func(string, string) {
span.LogKV("connect", "start")
},
ConnectDone: func(string, string, error) {
span.LogKV("connect", "end")
},
TLSHandshakeStart: func() {
span.LogKV("TLS", "start")
},
TLSHandshakeDone: func(tls.ConnectionState, error) {
span.LogKV("TLS", "end")
},
GetConn: func(string) {
span.LogKV("get_conn", "start")
},
GotConn: func(httptrace.GotConnInfo) {
span.LogKV("get_conn", "end")
},
WroteHeaders: func() {
span.LogKV("wrote_headers", "done")
},
WroteRequest: func(wri httptrace.WroteRequestInfo) {
if wri.Err != nil {
span.LogKV("wrote_request", wri.Err.Error())
} else {
span.LogKV("wrote_request", "done")
}
},
GotFirstResponseByte: func() {
span.LogKV("got_first_byte", "done")
},
}
return req.WithContext(httptrace.WithClientTrace(req.Context(), trace))
}
|
[
"\"HOSTNAME\""
] |
[] |
[
"HOSTNAME"
] |
[]
|
["HOSTNAME"]
|
go
| 1 | 0 | |
test/functional/klusterlet_operator_suite_test.go
|
// +build functional
// (c) Copyright IBM Corporation 2019, 2020. All Rights Reserved.
// Note to U.S. Government Users Restricted Rights:
// U.S. Government Users Restricted Rights - Use, duplication or disclosure restricted by GSA ADP Schedule
// Contract with IBM Corp.
//
// Copyright (c) Red Hat, Inc.
// Copyright Contributors to the Open Cluster Management project
package klusterlet_addon_controller_test
import (
"context"
"flag"
"fmt"
"os"
"os/user"
"path/filepath"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog"
)
const (
klusterletAddonController = "klusterlet-addon-controller"
defaultImageRegistry = "quay.io/stolostron"
testNamespace = "test-klusterlet-addon-controller"
klusterletAddonNamespace = "open-cluster-management"
)
var (
// useSha bool
// tagPostfix string
clientCluster kubernetes.Interface
clientClusterDynamic dynamic.Interface
gvrKlusterletAddonConfig schema.GroupVersionResource
gvrManifestwork schema.GroupVersionResource
gvrManagedCluster schema.GroupVersionResource
gvrManagedClusterAddOn schema.GroupVersionResource
gvrClusterManagementAddOn schema.GroupVersionResource
gvrLease schema.GroupVersionResource
kubeconfig string
imageRegistry string
)
func newLease(name, namespace string, renewTime string) *unstructured.Unstructured {
return &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "coordination.k8s.io/v1",
"kind": "Lease",
"metadata": map[string]interface{}{
"name": name,
"namespace": namespace,
},
"spec": map[string]interface{}{
"leaseDurationSeconds": 60,
"renewTime": renewTime,
},
},
}
}
func newManagedCluster(name, namespace string) *unstructured.Unstructured {
return &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "cluster.open-cluster-management.io/v1",
"kind": "ManagedCluster",
"metadata": map[string]interface{}{
"name": name,
"namespace": namespace,
},
"spec": map[string]interface{}{
"hubAcceptsClient": true,
},
"status": map[string]interface{}{
"conditions": []map[string]interface{}{
map[string]interface{}{
"type": "placeholder",
"lastTransitionTime": "2020-01-01T01:01:01Z",
"reason": "placeholder",
"status": "False",
},
},
},
},
}
}
func newKlusterletAddonConfig(name, namespace, version string) *unstructured.Unstructured {
return &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "agent.open-cluster-management.io/v1",
"kind": "KlusterletAddonConfig",
"metadata": map[string]interface{}{
"name": name,
"namespace": namespace,
},
"spec": map[string]interface{}{
"applicationManager": map[string]interface{}{
"enabled": true,
},
"certPolicyController": map[string]interface{}{
"enabled": true,
},
"clusterLabels": map[string]interface{}{
"cloud": "auto-detect",
"vendor": "auto-detect",
},
"clusterName": "testCluster",
"clusterNamespace": "testCluster",
"imageRegistry": imageRegistry,
"imagePullSecret": "multicloud-image-pull-secret",
"policyController": map[string]interface{}{
"enabled": true,
},
"searchCollector": map[string]interface{}{
"enabled": true,
},
"iamPolicyController": map[string]interface{}{
"enabled": true,
},
"version": version,
},
},
}
}
// deleteIfExists deletes resources by using gvr & name & namespace, will wait for deletion to complete by using eventually
func deleteIfExists(clientHubDynamic dynamic.Interface, gvr schema.GroupVersionResource, name, namespace string) {
ns := clientHubDynamic.Resource(gvr).Namespace(namespace)
if _, err := ns.Get(context.TODO(), name, metav1.GetOptions{}); err != nil {
Expect(errors.IsNotFound(err)).To(Equal(true))
return
}
Expect(func() error {
// possibly already got deleted
err := ns.Delete(context.TODO(), name, metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
return err
}
return nil
}()).To(BeNil())
// remove finalizers if needed
if obj, err := ns.Get(context.TODO(), name, metav1.GetOptions{}); err == nil {
if len(obj.GetFinalizers()) > 0 {
obj.SetFinalizers([]string{})
_, _ = ns.Update(context.TODO(), obj, metav1.UpdateOptions{})
}
}
klog.V(2).Info("Wait for deletion")
Eventually(func() error {
var err error
_, err = ns.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
return err
}
if err == nil {
return fmt.Errorf("found object %s in namespace %s after deletion", name, namespace)
}
return nil
}, 10, 1).Should(BeNil())
}
// createNewUnstructured creates resources by using gvr & obj
func createNewUnstructured(
clientClusterDynamic dynamic.Interface,
gvr schema.GroupVersionResource,
obj *unstructured.Unstructured,
name, namespace string,
) {
ns := clientClusterDynamic.Resource(gvr).Namespace(namespace)
Expect(ns.Create(context.TODO(), obj, metav1.CreateOptions{})).NotTo(BeNil())
Expect(ns.Get(context.TODO(), name, metav1.GetOptions{})).NotTo(BeNil())
}
func init() {
klog.SetOutput(GinkgoWriter)
klog.InitFlags(nil)
flag.StringVar(&kubeconfig, "kubeconfig", "", "Location of the kubeconfig to use; defaults to KUBECONFIG if not set")
flag.StringVar(&imageRegistry, "image-registry", defaultImageRegistry, fmt.Sprintf("URL if the image registry (ie: %s", defaultImageRegistry))
}
func TestKlusterletOperator(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "KlusterletOperator Suite")
}
var _ = BeforeSuite(func() {
klog.V(1).Info("running before suite")
By("Setup Kube client")
gvrKlusterletAddonConfig = schema.GroupVersionResource{Group: "agent.open-cluster-management.io", Version: "v1", Resource: "klusterletaddonconfigs"}
gvrManifestwork = schema.GroupVersionResource{Group: "work.open-cluster-management.io", Version: "v1", Resource: "manifestworks"}
gvrManagedCluster = schema.GroupVersionResource{Group: "cluster.open-cluster-management.io", Version: "v1", Resource: "managedclusters"}
gvrClusterManagementAddOn = schema.GroupVersionResource{Group: "addon.open-cluster-management.io", Version: "v1alpha1", Resource: "clustermanagementaddons"}
gvrManagedClusterAddOn = schema.GroupVersionResource{Group: "addon.open-cluster-management.io", Version: "v1alpha1", Resource: "managedclusteraddons"}
gvrLease = schema.GroupVersionResource{Group: "coordination.k8s.io", Version: "v1", Resource: "leases"}
clientCluster = NewKubeClient("", "", "")
clientClusterDynamic = NewKubeClientDynamic("", "", "")
By("Create Namesapce if needed")
namespaces := clientCluster.CoreV1().Namespaces()
if _, err := namespaces.Get(context.TODO(), testNamespace, metav1.GetOptions{}); err != nil && errors.IsNotFound(err) {
Expect(namespaces.Create(context.TODO(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: testNamespace,
},
}, metav1.CreateOptions{})).NotTo(BeNil())
}
d, err := clientCluster.AppsV1().Deployments(klusterletAddonNamespace).Get(context.TODO(), klusterletAddonController, metav1.GetOptions{})
if err != nil {
klog.V(1).Infof("klusterlet-addon-controller:\n%#v", d)
}
Expect(err).To(BeNil())
})
func NewKubeClient(url, kubeconfig, context string) kubernetes.Interface {
klog.V(1).Infof("Create kubeclient for url %s using kubeconfig path %s\n", url, kubeconfig)
config, err := LoadConfig(url, kubeconfig, context)
if err != nil {
panic(err)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err)
}
return clientset
}
func NewKubeClientDynamic(url, kubeconfig, context string) dynamic.Interface {
klog.V(1).Infof("Create kubeclient dynamic for url %s using kubeconfig path %s\n", url, kubeconfig)
config, err := LoadConfig(url, kubeconfig, context)
if err != nil {
panic(err)
}
clientset, err := dynamic.NewForConfig(config)
if err != nil {
panic(err)
}
return clientset
}
func LoadConfig(url, kubeconfig, context string) (*rest.Config, error) {
if kubeconfig == "" {
kubeconfig = os.Getenv("KUBECONFIG")
}
klog.V(1).Infof("Kubeconfig path %s\n", kubeconfig)
// If we have an explicit indication of where the kubernetes config lives, read that.
if kubeconfig != "" {
if context == "" {
return clientcmd.BuildConfigFromFlags(url, kubeconfig)
}
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig},
&clientcmd.ConfigOverrides{
CurrentContext: context,
}).ClientConfig()
}
// If not, try the in-cluster config.
if c, err := rest.InClusterConfig(); err == nil {
return c, nil
}
// If no in-cluster config, try the default location in the user's home directory.
if usr, err := user.Current(); err == nil {
klog.V(1).Infof("clientcmd.BuildConfigFromFlags for url %s using %s\n", url, filepath.Join(usr.HomeDir, ".kube/config"))
if c, err := clientcmd.BuildConfigFromFlags("", filepath.Join(usr.HomeDir, ".kube/config")); err == nil {
return c, nil
}
}
return nil, fmt.Errorf("could not create a valid kubeconfig")
}
|
[
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
python/interpret_text/__init__.py
|
# Setup logging infrustructure
import logging
import os
import atexit
_major = "0"
_minor = "1"
_patch = "3"
__name__ = "interpret-text"
__version__ = "{}.{}.{}.dev10".format(_major, _minor, _patch)
# Only log to disk if environment variable specified
interpret_text_logs = os.environ.get('INTERPRET_TEXT_LOGS')
if interpret_text_logs is not None:
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
os.makedirs(os.path.dirname(interpret_text_logs), exist_ok=True)
handler = logging.FileHandler(interpret_text_logs, mode='w')
handler.setLevel(logging.INFO)
logger.addHandler(handler)
logger.info('Initializing logging file for interpret-text')
def close_handler():
handler.close()
logger.removeHandler(handler)
atexit.register(close_handler)
|
[] |
[] |
[
"INTERPRET_TEXT_LOGS"
] |
[]
|
["INTERPRET_TEXT_LOGS"]
|
python
| 1 | 0 | |
dev/dev.go
|
package main
import (
"context"
"encoding/json"
"fmt"
"math"
"os"
firebase "firebase.google.com/go"
game "github.com/TimothyGregg/formicid/game"
"github.com/TimothyGregg/formicid/game/util/uid"
rl "github.com/gen2brain/raylib-go/raylib"
"google.golang.org/api/option"
)
func main() {
for _, arg := range os.Args[1:] {
switch arg {
case "-s":
//go api
case "-l":
defer local_draw()
case "-j":
print_json()
case "-f":
firebase_dump()
}
}
}
func firebase_dump() {
conf := &firebase.Config{DatabaseURL: os.Getenv("FIREBASE_URL"), ProjectID: "formicid", StorageBucket: "formicid.appspot.com"}
opt := option.WithCredentialsFile("/home/tim/Desktop/formicid-firebase-pk.json")
app, err := firebase.NewApp(context.Background(), conf, opt)
if err != nil {
panic(fmt.Errorf("error initializing app: %v", err))
}
client, err := app.Database(context.Background())
if err != nil {
panic(err)
}
newgame := game.New_Game(uid.NewUID(0), 100, 100)
ref := client.NewRef("/games/" + fmt.Sprint(newgame.UID.Value()))
ref.Set(context.Background(), newgame)
}
func print_json() {
g := game.New_Game(uid.NewUID(0), 50, 50)
data, err := json.MarshalIndent(g, "", "\t")
if err != nil {
fmt.Println(err)
}
fmt.Println(string(data))
}
func local_draw() {
g := game.New_Game(uid.NewUID(0), 50, 50)
size := g.Board.Size
fmt.Println(size)
border := 50
rl.InitWindow(int32(size[0]+border), int32(size[1]+border), "raylib [core] example - basic window")
rl.SetTargetFPS(60)
for !rl.WindowShouldClose() {
rl.BeginDrawing()
rl.ClearBackground(rl.RayWhite)
for _, node := range g.Board.Nodes {
x, y, r := node.X, node.Y, node.Radius
rl.DrawCircle(int32(x+int(border)/2), int32(y+int(border)/2), float32(r), rl.Lime)
rl.DrawText(fmt.Sprint(node.UID), int32(x+int(border)/2+5), int32(y+int(border)/2+5), 20, rl.Blue)
}
/* for _, path := range g.Board.Paths {
v1, v2 := path.Vertices()
x1, y1 := v1.Position()
x2, y2 := v2.Position()
rl.DrawLine(int32(x1+int(border)/2), int32(y1+int(border)/2), int32(x2+int(border)/2), int32(y2+int(border)/2), rl.Red)
} */
for node_uid, arr := range g.Board.NodeConnections {
for _, other := range arr {
node, err := g.Board.NodeByID(node_uid)
if err != nil {
os.Exit(1)
}
x2, y2 := other.X, other.Y
rl.DrawLine(int32(node.X+int(border)/2), int32(node.Y+int(border)/2), int32(x2+int(border)/2), int32(y2+int(border)/2), rl.Red)
}
}
rl.EndDrawing()
//start := time.Now()
g.Board.Update()
//fmt.Println(time.Since(start))
//fmt.Println(int(g.Board.Element.Timer()))
}
rl.CloseWindow()
fmt.Println("E/V^2 = " + fmt.Sprint(float64(len(g.Board.Paths))/math.Pow(float64(len(g.Board.Nodes)), 2.0)))
}
|
[
"\"FIREBASE_URL\""
] |
[] |
[
"FIREBASE_URL"
] |
[]
|
["FIREBASE_URL"]
|
go
| 1 | 0 | |
integration-cli/docker_cli_build_test.go
|
package main
import (
"archive/tar"
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"text/template"
"time"
"github.com/docker/docker/builder/dockerfile/command"
"github.com/docker/docker/integration-cli/checker"
"github.com/docker/docker/integration-cli/cli"
"github.com/docker/docker/integration-cli/cli/build"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/stringutils"
"github.com/docker/docker/pkg/testutil"
icmd "github.com/docker/docker/pkg/testutil/cmd"
"github.com/go-check/check"
)
func (s *DockerSuite) TestBuildJSONEmptyRun(c *check.C) {
cli.BuildCmd(c, "testbuildjsonemptyrun", build.WithDockerfile(`
FROM busybox
RUN []
`))
}
func (s *DockerSuite) TestBuildShCmdJSONEntrypoint(c *check.C) {
name := "testbuildshcmdjsonentrypoint"
expected := "/bin/sh -c echo test"
if testEnv.DaemonPlatform() == "windows" {
expected = "cmd /S /C echo test"
}
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM busybox
ENTRYPOINT ["echo"]
CMD echo test
`))
out, _ := dockerCmd(c, "run", "--rm", name)
if strings.TrimSpace(out) != expected {
c.Fatalf("CMD did not contain %q : %q", expected, out)
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementUser(c *check.C) {
// Windows does not support FROM scratch or the USER command
testRequires(c, DaemonIsLinux)
name := "testbuildenvironmentreplacement"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM scratch
ENV user foo
USER ${user}
`))
res := inspectFieldJSON(c, name, "Config.User")
if res != `"foo"` {
c.Fatal("User foo from environment not in Config.User on image")
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementVolume(c *check.C) {
name := "testbuildenvironmentreplacement"
var volumePath string
if testEnv.DaemonPlatform() == "windows" {
volumePath = "c:/quux"
} else {
volumePath = "/quux"
}
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM `+minimalBaseImage()+`
ENV volume `+volumePath+`
VOLUME ${volume}
`))
var volumes map[string]interface{}
inspectFieldAndUnmarshall(c, name, "Config.Volumes", &volumes)
if _, ok := volumes[volumePath]; !ok {
c.Fatal("Volume " + volumePath + " from environment not in Config.Volumes on image")
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementExpose(c *check.C) {
// Windows does not support FROM scratch or the EXPOSE command
testRequires(c, DaemonIsLinux)
name := "testbuildenvironmentreplacement"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM scratch
ENV port 80
EXPOSE ${port}
ENV ports " 99 100 "
EXPOSE ${ports}
`))
var exposedPorts map[string]interface{}
inspectFieldAndUnmarshall(c, name, "Config.ExposedPorts", &exposedPorts)
exp := []int{80, 99, 100}
for _, p := range exp {
tmp := fmt.Sprintf("%d/tcp", p)
if _, ok := exposedPorts[tmp]; !ok {
c.Fatalf("Exposed port %d from environment not in Config.ExposedPorts on image", p)
}
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementWorkdir(c *check.C) {
name := "testbuildenvironmentreplacement"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM busybox
ENV MYWORKDIR /work
RUN mkdir ${MYWORKDIR}
WORKDIR ${MYWORKDIR}
`))
res := inspectFieldJSON(c, name, "Config.WorkingDir")
expected := `"/work"`
if testEnv.DaemonPlatform() == "windows" {
expected = `"C:\\work"`
}
if res != expected {
c.Fatalf("Workdir /workdir from environment not in Config.WorkingDir on image: %s", res)
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementAddCopy(c *check.C) {
name := "testbuildenvironmentreplacement"
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", `
FROM `+minimalBaseImage()+`
ENV baz foo
ENV quux bar
ENV dot .
ENV fee fff
ENV gee ggg
ADD ${baz} ${dot}
COPY ${quux} ${dot}
ADD ${zzz:-${fee}} ${dot}
COPY ${zzz:-${gee}} ${dot}
`),
withFile("foo", "test1"),
withFile("bar", "test2"),
withFile("fff", "test3"),
withFile("ggg", "test4"),
))
}
func (s *DockerSuite) TestBuildEnvironmentReplacementEnv(c *check.C) {
// ENV expansions work differently in Windows
testRequires(c, DaemonIsLinux)
name := "testbuildenvironmentreplacement"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM busybox
ENV foo zzz
ENV bar ${foo}
ENV abc1='$foo'
ENV env1=$foo env2=${foo} env3="$foo" env4="${foo}"
RUN [ "$abc1" = '$foo' ] && (echo "$abc1" | grep -q foo)
ENV abc2="\$foo"
RUN [ "$abc2" = '$foo' ] && (echo "$abc2" | grep -q foo)
ENV abc3 '$foo'
RUN [ "$abc3" = '$foo' ] && (echo "$abc3" | grep -q foo)
ENV abc4 "\$foo"
RUN [ "$abc4" = '$foo' ] && (echo "$abc4" | grep -q foo)
`))
envResult := []string{}
inspectFieldAndUnmarshall(c, name, "Config.Env", &envResult)
found := false
envCount := 0
for _, env := range envResult {
parts := strings.SplitN(env, "=", 2)
if parts[0] == "bar" {
found = true
if parts[1] != "zzz" {
c.Fatalf("Could not find replaced var for env `bar`: got %q instead of `zzz`", parts[1])
}
} else if strings.HasPrefix(parts[0], "env") {
envCount++
if parts[1] != "zzz" {
c.Fatalf("%s should be 'zzz' but instead its %q", parts[0], parts[1])
}
} else if strings.HasPrefix(parts[0], "env") {
envCount++
if parts[1] != "foo" {
c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1])
}
}
}
if !found {
c.Fatal("Never found the `bar` env variable")
}
if envCount != 4 {
c.Fatalf("Didn't find all env vars - only saw %d\n%s", envCount, envResult)
}
}
func (s *DockerSuite) TestBuildHandleEscapesInVolume(c *check.C) {
// The volume paths used in this test are invalid on Windows
testRequires(c, DaemonIsLinux)
name := "testbuildhandleescapes"
testCases := []struct {
volumeValue string
expected string
}{
{
volumeValue: "${FOO}",
expected: "bar",
},
{
volumeValue: `\${FOO}`,
expected: "${FOO}",
},
// this test in particular provides *7* backslashes and expects 6 to come back.
// Like above, the first escape is swallowed and the rest are treated as
// literals, this one is just less obvious because of all the character noise.
{
volumeValue: `\\\\\\\${FOO}`,
expected: `\\\${FOO}`,
},
}
for _, tc := range testCases {
buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`
FROM scratch
ENV FOO bar
VOLUME %s
`, tc.volumeValue)))
var result map[string]map[string]struct{}
inspectFieldAndUnmarshall(c, name, "Config.Volumes", &result)
if _, ok := result[tc.expected]; !ok {
c.Fatalf("Could not find volume %s set from env foo in volumes table, got %q", tc.expected, result)
}
// Remove the image for the next iteration
dockerCmd(c, "rmi", name)
}
}
func (s *DockerSuite) TestBuildOnBuildLowercase(c *check.C) {
name := "testbuildonbuildlowercase"
name2 := "testbuildonbuildlowercase2"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM busybox
onbuild run echo quux
`))
result := buildImage(name2, build.WithDockerfile(fmt.Sprintf(`
FROM %s
`, name)))
result.Assert(c, icmd.Success)
if !strings.Contains(result.Combined(), "quux") {
c.Fatalf("Did not receive the expected echo text, got %s", result.Combined())
}
if strings.Contains(result.Combined(), "ONBUILD ONBUILD") {
c.Fatalf("Got an ONBUILD ONBUILD error with no error: got %s", result.Combined())
}
}
func (s *DockerSuite) TestBuildEnvEscapes(c *check.C) {
// ENV expansions work differently in Windows
testRequires(c, DaemonIsLinux)
name := "testbuildenvescapes"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM busybox
ENV TEST foo
CMD echo \$
`))
out, _ := dockerCmd(c, "run", "-t", name)
if strings.TrimSpace(out) != "$" {
c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out))
}
}
func (s *DockerSuite) TestBuildEnvOverwrite(c *check.C) {
// ENV expansions work differently in Windows
testRequires(c, DaemonIsLinux)
name := "testbuildenvoverwrite"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM busybox
ENV TEST foo
CMD echo ${TEST}
`))
out, _ := dockerCmd(c, "run", "-e", "TEST=bar", "-t", name)
if strings.TrimSpace(out) != "bar" {
c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out))
}
}
// FIXME(vdemeester) why we disabled cache here ?
func (s *DockerSuite) TestBuildOnBuildCmdEntrypointJSON(c *check.C) {
name1 := "onbuildcmd"
name2 := "onbuildgenerated"
buildImageSuccessfully(c, name1, build.WithDockerfile(`
FROM busybox
ONBUILD CMD ["hello world"]
ONBUILD ENTRYPOINT ["echo"]
ONBUILD RUN ["true"]`))
buildImageSuccessfully(c, name2, build.WithDockerfile(fmt.Sprintf(`FROM %s`, name1)))
out, _ := dockerCmd(c, "run", name2)
if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) {
c.Fatalf("did not get echo output from onbuild. Got: %q", out)
}
}
// FIXME(vdemeester) why we disabled cache here ?
func (s *DockerSuite) TestBuildOnBuildEntrypointJSON(c *check.C) {
name1 := "onbuildcmd"
name2 := "onbuildgenerated"
buildImageSuccessfully(c, name1, build.WithDockerfile(`
FROM busybox
ONBUILD ENTRYPOINT ["echo"]`))
buildImageSuccessfully(c, name2, build.WithDockerfile(fmt.Sprintf("FROM %s\nCMD [\"hello world\"]\n", name1)))
out, _ := dockerCmd(c, "run", name2)
if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) {
c.Fatal("got malformed output from onbuild", out)
}
}
func (s *DockerSuite) TestBuildCacheAdd(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet
name := "testbuildtwoimageswithadd"
server := fakeStorage(c, map[string]string{
"robots.txt": "hello",
"index.html": "world",
})
defer server.Close()
cli.BuildCmd(c, name, build.WithDockerfile(fmt.Sprintf(`FROM scratch
ADD %s/robots.txt /`, server.URL())))
result := cli.Docker(cli.Build(name), build.WithDockerfile(fmt.Sprintf(`FROM scratch
ADD %s/index.html /`, server.URL())))
result.Assert(c, icmd.Success)
if strings.Contains(result.Combined(), "Using cache") {
c.Fatal("2nd build used cache on ADD, it shouldn't")
}
}
func (s *DockerSuite) TestBuildLastModified(c *check.C) {
// Temporary fix for #30890. TODO @jhowardmsft figure out what
// has changed in the master busybox image.
testRequires(c, DaemonIsLinux)
name := "testbuildlastmodified"
server := fakeStorage(c, map[string]string{
"file": "hello",
})
defer server.Close()
var out, out2 string
dFmt := `FROM busybox
ADD %s/file /`
dockerfile := fmt.Sprintf(dFmt, server.URL())
buildImageSuccessfully(c, name, build.WithoutCache, build.WithDockerfile(dockerfile))
out, _ = dockerCmd(c, "run", name, "ls", "-le", "/file")
// Build it again and make sure the mtime of the file didn't change.
// Wait a few seconds to make sure the time changed enough to notice
time.Sleep(2 * time.Second)
buildImageSuccessfully(c, name, build.WithoutCache, build.WithDockerfile(dockerfile))
out2, _ = dockerCmd(c, "run", name, "ls", "-le", "/file")
if out != out2 {
c.Fatalf("MTime changed:\nOrigin:%s\nNew:%s", out, out2)
}
// Now 'touch' the file and make sure the timestamp DID change this time
// Create a new fakeStorage instead of just using Add() to help windows
server = fakeStorage(c, map[string]string{
"file": "hello",
})
defer server.Close()
dockerfile = fmt.Sprintf(dFmt, server.URL())
buildImageSuccessfully(c, name, build.WithoutCache, build.WithDockerfile(dockerfile))
out2, _ = dockerCmd(c, "run", name, "ls", "-le", "/file")
if out == out2 {
c.Fatalf("MTime didn't change:\nOrigin:%s\nNew:%s", out, out2)
}
}
// Regression for https://github.com/docker/docker/pull/27805
// Makes sure that we don't use the cache if the contents of
// a file in a subfolder of the context is modified and we re-build.
func (s *DockerSuite) TestBuildModifyFileInFolder(c *check.C) {
name := "testbuildmodifyfileinfolder"
ctx := fakeContext(c, `FROM busybox
RUN ["mkdir", "/test"]
ADD folder/file /test/changetarget`,
map[string]string{})
defer ctx.Close()
if err := ctx.Add("folder/file", "first"); err != nil {
c.Fatal(err)
}
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
id1 := getIDByName(c, name)
if err := ctx.Add("folder/file", "second"); err != nil {
c.Fatal(err)
}
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
id2 := getIDByName(c, name)
if id1 == id2 {
c.Fatal("cache was used even though file contents in folder was changed")
}
}
func (s *DockerSuite) TestBuildAddSingleFileToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testaddimg", withBuildContext(c,
withFile("Dockerfile", fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio /exists
ADD test_file /
RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)),
withFile("test_file", "test1")))
}
// Issue #3960: "ADD src ." hangs
func (s *DockerSuite) TestBuildAddSingleFileToWorkdir(c *check.C) {
name := "testaddsinglefiletoworkdir"
ctx := fakeContext(c, `FROM busybox
ADD test_file .`,
map[string]string{
"test_file": "test1",
})
defer ctx.Close()
errChan := make(chan error)
go func() {
errChan <- buildImage(name, withExternalBuildContext(ctx)).Error
close(errChan)
}()
select {
case <-time.After(15 * time.Second):
c.Fatal("Build with adding to workdir timed out")
case err := <-errChan:
c.Assert(err, check.IsNil)
}
}
func (s *DockerSuite) TestBuildAddSingleFileToExistDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testaddsinglefiletoexistdir", withBuildContext(c,
withFile("Dockerfile", `FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
ADD test_file /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`),
withFile("test_file", "test1")))
}
func (s *DockerSuite) TestBuildCopyAddMultipleFiles(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
server := fakeStorage(c, map[string]string{
"robots.txt": "hello",
})
defer server.Close()
buildImageSuccessfully(c, "testcopymultiplefilestofile", withBuildContext(c,
withFile("Dockerfile", fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
COPY test_file1 test_file2 /exists/
ADD test_file3 test_file4 %s/robots.txt /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file1 | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/test_file2 | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/test_file3 | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/test_file4 | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/robots.txt | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
`, server.URL())),
withFile("test_file1", "test1"),
withFile("test_file2", "test2"),
withFile("test_file3", "test3"),
withFile("test_file3", "test3"),
withFile("test_file4", "test4")))
}
// These tests are mainly for user namespaces to verify that new directories
// are created as the remapped root uid/gid pair
func (s *DockerSuite) TestBuildUsernamespaceValidateRemappedRoot(c *check.C) {
testRequires(c, DaemonIsLinux)
testCases := []string{
"ADD . /new_dir",
"COPY test_dir /new_dir",
"WORKDIR /new_dir",
}
name := "testbuildusernamespacevalidateremappedroot"
for _, tc := range testCases {
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", fmt.Sprintf(`FROM busybox
%s
RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, tc)),
withFile("test_dir/test_file", "test file")))
dockerCmd(c, "rmi", name)
}
}
func (s *DockerSuite) TestBuildAddAndCopyFileWithWhitespace(c *check.C) {
testRequires(c, DaemonIsLinux) // Not currently passing on Windows
name := "testaddfilewithwhitespace"
for _, command := range []string{"ADD", "COPY"} {
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", fmt.Sprintf(`FROM busybox
RUN mkdir "/test dir"
RUN mkdir "/test_dir"
%s [ "test file1", "/test_file1" ]
%s [ "test_file2", "/test file2" ]
%s [ "test file3", "/test file3" ]
%s [ "test dir/test_file4", "/test_dir/test_file4" ]
%s [ "test_dir/test_file5", "/test dir/test_file5" ]
%s [ "test dir/test_file6", "/test dir/test_file6" ]
RUN [ $(cat "/test_file1") = 'test1' ]
RUN [ $(cat "/test file2") = 'test2' ]
RUN [ $(cat "/test file3") = 'test3' ]
RUN [ $(cat "/test_dir/test_file4") = 'test4' ]
RUN [ $(cat "/test dir/test_file5") = 'test5' ]
RUN [ $(cat "/test dir/test_file6") = 'test6' ]`, command, command, command, command, command, command)),
withFile("test file1", "test1"),
withFile("test_file2", "test2"),
withFile("test file3", "test3"),
withFile("test dir/test_file4", "test4"),
withFile("test_dir/test_file5", "test5"),
withFile("test dir/test_file6", "test6"),
))
dockerCmd(c, "rmi", name)
}
}
func (s *DockerSuite) TestBuildCopyFileWithWhitespaceOnWindows(c *check.C) {
testRequires(c, DaemonIsWindows)
dockerfile := `FROM ` + testEnv.MinimalBaseImage() + `
RUN mkdir "C:/test dir"
RUN mkdir "C:/test_dir"
COPY [ "test file1", "/test_file1" ]
COPY [ "test_file2", "/test file2" ]
COPY [ "test file3", "/test file3" ]
COPY [ "test dir/test_file4", "/test_dir/test_file4" ]
COPY [ "test_dir/test_file5", "/test dir/test_file5" ]
COPY [ "test dir/test_file6", "/test dir/test_file6" ]
RUN find "test1" "C:/test_file1"
RUN find "test2" "C:/test file2"
RUN find "test3" "C:/test file3"
RUN find "test4" "C:/test_dir/test_file4"
RUN find "test5" "C:/test dir/test_file5"
RUN find "test6" "C:/test dir/test_file6"`
name := "testcopyfilewithwhitespace"
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", dockerfile),
withFile("test file1", "test1"),
withFile("test_file2", "test2"),
withFile("test file3", "test3"),
withFile("test dir/test_file4", "test4"),
withFile("test_dir/test_file5", "test5"),
withFile("test dir/test_file6", "test6"),
))
}
func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) {
name := "testcopywildcard"
server := fakeStorage(c, map[string]string{
"robots.txt": "hello",
"index.html": "world",
})
defer server.Close()
ctx := fakeContext(c, fmt.Sprintf(`FROM busybox
COPY file*.txt /tmp/
RUN ls /tmp/file1.txt /tmp/file2.txt
RUN [ "mkdir", "/tmp1" ]
COPY dir* /tmp1/
RUN ls /tmp1/dirt /tmp1/nested_file /tmp1/nested_dir/nest_nest_file
RUN [ "mkdir", "/tmp2" ]
ADD dir/*dir %s/robots.txt /tmp2/
RUN ls /tmp2/nest_nest_file /tmp2/robots.txt
`, server.URL()),
map[string]string{
"file1.txt": "test1",
"file2.txt": "test2",
"dir/nested_file": "nested file",
"dir/nested_dir/nest_nest_file": "2 times nested",
"dirt": "dirty",
})
defer ctx.Close()
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
id1 := getIDByName(c, name)
// Now make sure we use a cache the 2nd time
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
id2 := getIDByName(c, name)
if id1 != id2 {
c.Fatal("didn't use the cache")
}
}
func (s *DockerSuite) TestBuildCopyWildcardInName(c *check.C) {
// Run this only on Linux
// Below is the original comment (that I don't agree with — vdemeester)
// Normally we would do c.Fatal(err) here but given that
// the odds of this failing are so rare, it must be because
// the OS we're running the client on doesn't support * in
// filenames (like windows). So, instead of failing the test
// just let it pass. Then we don't need to explicitly
// say which OSs this works on or not.
testRequires(c, DaemonIsLinux, UnixCli)
buildImageSuccessfully(c, "testcopywildcardinname", withBuildContext(c,
withFile("Dockerfile", `FROM busybox
COPY *.txt /tmp/
RUN [ "$(cat /tmp/\*.txt)" = 'hi there' ]
`),
withFile("*.txt", "hi there"),
))
}
func (s *DockerSuite) TestBuildCopyWildcardCache(c *check.C) {
name := "testcopywildcardcache"
ctx := fakeContext(c, `FROM busybox
COPY file1.txt /tmp/`,
map[string]string{
"file1.txt": "test1",
})
defer ctx.Close()
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
id1 := getIDByName(c, name)
// Now make sure we use a cache the 2nd time even with wild cards.
// Use the same context so the file is the same and the checksum will match
ctx.Add("Dockerfile", `FROM busybox
COPY file*.txt /tmp/`)
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
id2 := getIDByName(c, name)
if id1 != id2 {
c.Fatal("didn't use the cache")
}
}
func (s *DockerSuite) TestBuildAddSingleFileToNonExistingDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testaddsinglefiletononexistingdir", withBuildContext(c,
withFile("Dockerfile", `FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio /exists
ADD test_file /test_dir/
RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`),
withFile("test_file", "test1")))
}
func (s *DockerSuite) TestBuildAddDirContentToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testadddircontenttoroot", withBuildContext(c,
withFile("Dockerfile", `FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio exists
ADD test_dir /
RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`),
withFile("test_dir/test_file", "test1")))
}
func (s *DockerSuite) TestBuildAddDirContentToExistingDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testadddircontenttoexistingdir", withBuildContext(c,
withFile("Dockerfile", `FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
ADD test_dir/ /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`),
withFile("test_dir/test_file", "test1")))
}
func (s *DockerSuite) TestBuildAddWholeDirToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testaddwholedirtoroot", withBuildContext(c,
withFile("Dockerfile", fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio exists
ADD test_dir /test_dir
RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)),
withFile("test_dir/test_file", "test1")))
}
// Testing #5941 : Having an etc directory in context conflicts with the /etc/mtab
func (s *DockerSuite) TestBuildAddOrCopyEtcToRootShouldNotConflict(c *check.C) {
buildImageSuccessfully(c, "testaddetctoroot", withBuildContext(c,
withFile("Dockerfile", `FROM `+minimalBaseImage()+`
ADD . /`),
withFile("etc/test_file", "test1")))
buildImageSuccessfully(c, "testcopyetctoroot", withBuildContext(c,
withFile("Dockerfile", `FROM `+minimalBaseImage()+`
COPY . /`),
withFile("etc/test_file", "test1")))
}
// Testing #9401 : Losing setuid flag after a ADD
func (s *DockerSuite) TestBuildAddPreservesFilesSpecialBits(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testaddetctoroot", withBuildContext(c,
withFile("Dockerfile", `FROM busybox
ADD suidbin /usr/bin/suidbin
RUN chmod 4755 /usr/bin/suidbin
RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]
ADD ./data/ /
RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]`),
withFile("suidbin", "suidbin"),
withFile("/data/usr/test_file", "test1")))
}
func (s *DockerSuite) TestBuildCopySingleFileToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testcopysinglefiletoroot", withBuildContext(c,
withFile("Dockerfile", fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio /exists
COPY test_file /
RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)),
withFile("test_file", "test1")))
}
// Issue #3960: "ADD src ." hangs - adapted for COPY
func (s *DockerSuite) TestBuildCopySingleFileToWorkdir(c *check.C) {
name := "testcopysinglefiletoworkdir"
ctx := fakeContext(c, `FROM busybox
COPY test_file .`,
map[string]string{
"test_file": "test1",
})
defer ctx.Close()
errChan := make(chan error)
go func() {
errChan <- buildImage(name, withExternalBuildContext(ctx)).Error
close(errChan)
}()
select {
case <-time.After(15 * time.Second):
c.Fatal("Build with adding to workdir timed out")
case err := <-errChan:
c.Assert(err, check.IsNil)
}
}
func (s *DockerSuite) TestBuildCopySingleFileToExistDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testcopysinglefiletoexistdir", withBuildContext(c,
withFile("Dockerfile", `FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
COPY test_file /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`),
withFile("test_file", "test1")))
}
func (s *DockerSuite) TestBuildCopySingleFileToNonExistDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific
buildImageSuccessfully(c, "testcopysinglefiletononexistdir", withBuildContext(c,
withFile("Dockerfile", `FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio /exists
COPY test_file /test_dir/
RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`),
withFile("test_file", "test1")))
}
func (s *DockerSuite) TestBuildCopyDirContentToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testcopydircontenttoroot", withBuildContext(c,
withFile("Dockerfile", `FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio exists
COPY test_dir /
RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`),
withFile("test_dir/test_file", "test1")))
}
func (s *DockerSuite) TestBuildCopyDirContentToExistDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testcopydircontenttoexistdir", withBuildContext(c,
withFile("Dockerfile", `FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
COPY test_dir/ /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`),
withFile("test_dir/test_file", "test1")))
}
func (s *DockerSuite) TestBuildCopyWholeDirToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testcopywholedirtoroot", withBuildContext(c,
withFile("Dockerfile", fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio exists
COPY test_dir /test_dir
RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)),
withFile("test_dir/test_file", "test1")))
}
func (s *DockerSuite) TestBuildAddBadLinks(c *check.C) {
testRequires(c, DaemonIsLinux) // Not currently working on Windows
dockerfile := `
FROM scratch
ADD links.tar /
ADD foo.txt /symlink/
`
targetFile := "foo.txt"
var (
name = "test-link-absolute"
)
ctx := fakeContext(c, dockerfile, nil)
defer ctx.Close()
tempDir, err := ioutil.TempDir("", "test-link-absolute-temp-")
if err != nil {
c.Fatalf("failed to create temporary directory: %s", tempDir)
}
defer os.RemoveAll(tempDir)
var symlinkTarget string
if runtime.GOOS == "windows" {
var driveLetter string
if abs, err := filepath.Abs(tempDir); err != nil {
c.Fatal(err)
} else {
driveLetter = abs[:1]
}
tempDirWithoutDrive := tempDir[2:]
symlinkTarget = fmt.Sprintf(`%s:\..\..\..\..\..\..\..\..\..\..\..\..%s`, driveLetter, tempDirWithoutDrive)
} else {
symlinkTarget = fmt.Sprintf("/../../../../../../../../../../../..%s", tempDir)
}
tarPath := filepath.Join(ctx.Dir, "links.tar")
nonExistingFile := filepath.Join(tempDir, targetFile)
fooPath := filepath.Join(ctx.Dir, targetFile)
tarOut, err := os.Create(tarPath)
if err != nil {
c.Fatal(err)
}
tarWriter := tar.NewWriter(tarOut)
header := &tar.Header{
Name: "symlink",
Typeflag: tar.TypeSymlink,
Linkname: symlinkTarget,
Mode: 0755,
Uid: 0,
Gid: 0,
}
err = tarWriter.WriteHeader(header)
if err != nil {
c.Fatal(err)
}
tarWriter.Close()
tarOut.Close()
foo, err := os.Create(fooPath)
if err != nil {
c.Fatal(err)
}
defer foo.Close()
if _, err := foo.WriteString("test"); err != nil {
c.Fatal(err)
}
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) {
c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile)
}
}
func (s *DockerSuite) TestBuildAddBadLinksVolume(c *check.C) {
testRequires(c, DaemonIsLinux) // ln not implemented on Windows busybox
const (
dockerfileTemplate = `
FROM busybox
RUN ln -s /../../../../../../../../%s /x
VOLUME /x
ADD foo.txt /x/`
targetFile = "foo.txt"
)
var (
name = "test-link-absolute-volume"
dockerfile = ""
)
tempDir, err := ioutil.TempDir("", "test-link-absolute-volume-temp-")
if err != nil {
c.Fatalf("failed to create temporary directory: %s", tempDir)
}
defer os.RemoveAll(tempDir)
dockerfile = fmt.Sprintf(dockerfileTemplate, tempDir)
nonExistingFile := filepath.Join(tempDir, targetFile)
ctx := fakeContext(c, dockerfile, nil)
defer ctx.Close()
fooPath := filepath.Join(ctx.Dir, targetFile)
foo, err := os.Create(fooPath)
if err != nil {
c.Fatal(err)
}
defer foo.Close()
if _, err := foo.WriteString("test"); err != nil {
c.Fatal(err)
}
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) {
c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile)
}
}
// Issue #5270 - ensure we throw a better error than "unexpected EOF"
// when we can't access files in the context.
func (s *DockerSuite) TestBuildWithInaccessibleFilesInContext(c *check.C) {
testRequires(c, DaemonIsLinux, UnixCli) // test uses chown/chmod: not available on windows
{
name := "testbuildinaccessiblefiles"
ctx := fakeContext(c, "FROM scratch\nADD . /foo/", map[string]string{"fileWithoutReadAccess": "foo"})
defer ctx.Close()
// This is used to ensure we detect inaccessible files early during build in the cli client
pathToFileWithoutReadAccess := filepath.Join(ctx.Dir, "fileWithoutReadAccess")
if err := os.Chown(pathToFileWithoutReadAccess, 0, 0); err != nil {
c.Fatalf("failed to chown file to root: %s", err)
}
if err := os.Chmod(pathToFileWithoutReadAccess, 0700); err != nil {
c.Fatalf("failed to chmod file to 700: %s", err)
}
result := icmd.RunCmd(icmd.Cmd{
Command: []string{"su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)},
Dir: ctx.Dir,
})
if result.Error == nil {
c.Fatalf("build should have failed: %s %s", result.Error, result.Combined())
}
// check if we've detected the failure before we started building
if !strings.Contains(result.Combined(), "no permission to read from ") {
c.Fatalf("output should've contained the string: no permission to read from but contained: %s", result.Combined())
}
if !strings.Contains(result.Combined(), "Error checking context") {
c.Fatalf("output should've contained the string: Error checking context")
}
}
{
name := "testbuildinaccessibledirectory"
ctx := fakeContext(c, "FROM scratch\nADD . /foo/", map[string]string{"directoryWeCantStat/bar": "foo"})
defer ctx.Close()
// This is used to ensure we detect inaccessible directories early during build in the cli client
pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat")
pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar")
if err := os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil {
c.Fatalf("failed to chown directory to root: %s", err)
}
if err := os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil {
c.Fatalf("failed to chmod directory to 444: %s", err)
}
if err := os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil {
c.Fatalf("failed to chmod file to 700: %s", err)
}
result := icmd.RunCmd(icmd.Cmd{
Command: []string{"su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)},
Dir: ctx.Dir,
})
if result.Error == nil {
c.Fatalf("build should have failed: %s %s", result.Error, result.Combined())
}
// check if we've detected the failure before we started building
if !strings.Contains(result.Combined(), "can't stat") {
c.Fatalf("output should've contained the string: can't access %s", result.Combined())
}
if !strings.Contains(result.Combined(), "Error checking context") {
c.Fatalf("output should've contained the string: Error checking context\ngot:%s", result.Combined())
}
}
{
name := "testlinksok"
ctx := fakeContext(c, "FROM scratch\nADD . /foo/", nil)
defer ctx.Close()
target := "../../../../../../../../../../../../../../../../../../../azA"
if err := os.Symlink(filepath.Join(ctx.Dir, "g"), target); err != nil {
c.Fatal(err)
}
defer os.Remove(target)
// This is used to ensure we don't follow links when checking if everything in the context is accessible
// This test doesn't require that we run commands as an unprivileged user
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
}
{
name := "testbuildignoredinaccessible"
ctx := fakeContext(c, "FROM scratch\nADD . /foo/",
map[string]string{
"directoryWeCantStat/bar": "foo",
".dockerignore": "directoryWeCantStat",
})
defer ctx.Close()
// This is used to ensure we don't try to add inaccessible files when they are ignored by a .dockerignore pattern
pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat")
pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar")
if err := os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil {
c.Fatalf("failed to chown directory to root: %s", err)
}
if err := os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil {
c.Fatalf("failed to chmod directory to 444: %s", err)
}
if err := os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil {
c.Fatalf("failed to chmod file to 700: %s", err)
}
result := icmd.RunCmd(icmd.Cmd{
Dir: ctx.Dir,
Command: []string{"su", "unprivilegeduser", "-c",
fmt.Sprintf("%s build -t %s .", dockerBinary, name)},
})
result.Assert(c, icmd.Expected{})
}
}
func (s *DockerSuite) TestBuildForceRm(c *check.C) {
containerCountBefore := getContainerCount(c)
name := "testbuildforcerm"
buildImage(name, cli.WithFlags("--force-rm"), withBuildContext(c,
withFile("Dockerfile", `FROM `+minimalBaseImage()+`
RUN true
RUN thiswillfail`))).Assert(c, icmd.Expected{
ExitCode: 1,
})
containerCountAfter := getContainerCount(c)
if containerCountBefore != containerCountAfter {
c.Fatalf("--force-rm shouldn't have left containers behind")
}
}
func (s *DockerSuite) TestBuildRm(c *check.C) {
name := "testbuildrm"
testCases := []struct {
buildflags []string
shouldLeftContainerBehind bool
}{
// Default case (i.e. --rm=true)
{
buildflags: []string{},
shouldLeftContainerBehind: false,
},
{
buildflags: []string{"--rm"},
shouldLeftContainerBehind: false,
},
{
buildflags: []string{"--rm=false"},
shouldLeftContainerBehind: true,
},
}
for _, tc := range testCases {
containerCountBefore := getContainerCount(c)
buildImageSuccessfully(c, name, cli.WithFlags(tc.buildflags...), build.WithDockerfile(`FROM busybox
RUN echo hello world`))
containerCountAfter := getContainerCount(c)
if tc.shouldLeftContainerBehind {
if containerCountBefore == containerCountAfter {
c.Fatalf("flags %v should have left containers behind", tc.buildflags)
}
} else {
if containerCountBefore != containerCountAfter {
c.Fatalf("flags %v shouldn't have left containers behind", tc.buildflags)
}
}
dockerCmd(c, "rmi", name)
}
}
func (s *DockerSuite) TestBuildWithVolumes(c *check.C) {
testRequires(c, DaemonIsLinux) // Invalid volume paths on Windows
var (
result map[string]map[string]struct{}
name = "testbuildvolumes"
emptyMap = make(map[string]struct{})
expected = map[string]map[string]struct{}{
"/test1": emptyMap,
"/test2": emptyMap,
"/test3": emptyMap,
"/test4": emptyMap,
"/test5": emptyMap,
"/test6": emptyMap,
"[/test7": emptyMap,
"/test8]": emptyMap,
}
)
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch
VOLUME /test1
VOLUME /test2
VOLUME /test3 /test4
VOLUME ["/test5", "/test6"]
VOLUME [/test7 /test8]
`))
inspectFieldAndUnmarshall(c, name, "Config.Volumes", &result)
equal := reflect.DeepEqual(&result, &expected)
if !equal {
c.Fatalf("Volumes %s, expected %s", result, expected)
}
}
func (s *DockerSuite) TestBuildMaintainer(c *check.C) {
name := "testbuildmaintainer"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
MAINTAINER dockerio`))
expected := "dockerio"
res := inspectField(c, name, "Author")
if res != expected {
c.Fatalf("Maintainer %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildUser(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuilduser"
expected := "dockerio"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
USER dockerio
RUN [ $(whoami) = 'dockerio' ]`))
res := inspectField(c, name, "Config.User")
if res != expected {
c.Fatalf("User %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildRelativeWorkdir(c *check.C) {
name := "testbuildrelativeworkdir"
var (
expected1 string
expected2 string
expected3 string
expected4 string
expectedFinal string
)
if testEnv.DaemonPlatform() == "windows" {
expected1 = `C:/`
expected2 = `C:/test1`
expected3 = `C:/test2`
expected4 = `C:/test2/test3`
expectedFinal = `C:\test2\test3` // Note inspect is going to return Windows paths, as it's not in busybox
} else {
expected1 = `/`
expected2 = `/test1`
expected3 = `/test2`
expected4 = `/test2/test3`
expectedFinal = `/test2/test3`
}
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
RUN sh -c "[ "$PWD" = "`+expected1+`" ]"
WORKDIR test1
RUN sh -c "[ "$PWD" = "`+expected2+`" ]"
WORKDIR /test2
RUN sh -c "[ "$PWD" = "`+expected3+`" ]"
WORKDIR test3
RUN sh -c "[ "$PWD" = "`+expected4+`" ]"`))
res := inspectField(c, name, "Config.WorkingDir")
if res != expectedFinal {
c.Fatalf("Workdir %s, expected %s", res, expectedFinal)
}
}
// #22181 Regression test. Single end-to-end test of using
// Windows semantics. Most path handling verifications are in unit tests
func (s *DockerSuite) TestBuildWindowsWorkdirProcessing(c *check.C) {
testRequires(c, DaemonIsWindows)
buildImageSuccessfully(c, "testbuildwindowsworkdirprocessing", build.WithDockerfile(`FROM busybox
WORKDIR C:\\foo
WORKDIR bar
RUN sh -c "[ "$PWD" = "C:/foo/bar" ]"
`))
}
// #22181 Regression test. Most paths handling verifications are in unit test.
// One functional test for end-to-end
func (s *DockerSuite) TestBuildWindowsAddCopyPathProcessing(c *check.C) {
testRequires(c, DaemonIsWindows)
// TODO Windows (@jhowardmsft). Needs a follow-up PR to 22181 to
// support backslash such as .\\ being equivalent to ./ and c:\\ being
// equivalent to c:/. This is not currently (nor ever has been) supported
// by docker on the Windows platform.
buildImageSuccessfully(c, "testbuildwindowsaddcopypathprocessing", withBuildContext(c,
withFile("Dockerfile", `FROM busybox
# No trailing slash on COPY/ADD
# Results in dir being changed to a file
WORKDIR /wc1
COPY wc1 c:/wc1
WORKDIR /wc2
ADD wc2 c:/wc2
WORKDIR c:/
RUN sh -c "[ $(cat c:/wc1/wc1) = 'hellowc1' ]"
RUN sh -c "[ $(cat c:/wc2/wc2) = 'worldwc2' ]"
# Trailing slash on COPY/ADD, Windows-style path.
WORKDIR /wd1
COPY wd1 c:/wd1/
WORKDIR /wd2
ADD wd2 c:/wd2/
RUN sh -c "[ $(cat c:/wd1/wd1) = 'hellowd1' ]"
RUN sh -c "[ $(cat c:/wd2/wd2) = 'worldwd2' ]"
`),
withFile("wc1", "hellowc1"),
withFile("wc2", "worldwc2"),
withFile("wd1", "hellowd1"),
withFile("wd2", "worldwd2"),
))
}
func (s *DockerSuite) TestBuildWorkdirWithEnvVariables(c *check.C) {
name := "testbuildworkdirwithenvvariables"
var expected string
if testEnv.DaemonPlatform() == "windows" {
expected = `C:\test1\test2`
} else {
expected = `/test1/test2`
}
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
ENV DIRPATH /test1
ENV SUBDIRNAME test2
WORKDIR $DIRPATH
WORKDIR $SUBDIRNAME/$MISSING_VAR`))
res := inspectField(c, name, "Config.WorkingDir")
if res != expected {
c.Fatalf("Workdir %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildRelativeCopy(c *check.C) {
// cat /test1/test2/foo gets permission denied for the user
testRequires(c, NotUserNamespace)
var expected string
if testEnv.DaemonPlatform() == "windows" {
expected = `C:/test1/test2`
} else {
expected = `/test1/test2`
}
buildImageSuccessfully(c, "testbuildrelativecopy", withBuildContext(c,
withFile("Dockerfile", `FROM busybox
WORKDIR /test1
WORKDIR test2
RUN sh -c "[ "$PWD" = '`+expected+`' ]"
COPY foo ./
RUN sh -c "[ $(cat /test1/test2/foo) = 'hello' ]"
ADD foo ./bar/baz
RUN sh -c "[ $(cat /test1/test2/bar/baz) = 'hello' ]"
COPY foo ./bar/baz2
RUN sh -c "[ $(cat /test1/test2/bar/baz2) = 'hello' ]"
WORKDIR ..
COPY foo ./
RUN sh -c "[ $(cat /test1/foo) = 'hello' ]"
COPY foo /test3/
RUN sh -c "[ $(cat /test3/foo) = 'hello' ]"
WORKDIR /test4
COPY . .
RUN sh -c "[ $(cat /test4/foo) = 'hello' ]"
WORKDIR /test5/test6
COPY foo ../
RUN sh -c "[ $(cat /test5/foo) = 'hello' ]"
`),
withFile("foo", "hello"),
))
}
func (s *DockerSuite) TestBuildBlankName(c *check.C) {
name := "testbuildblankname"
testCases := []struct {
expression string
expectedStderr string
}{
{
expression: "ENV =",
expectedStderr: "ENV names can not be blank",
},
{
expression: "LABEL =",
expectedStderr: "LABEL names can not be blank",
},
{
expression: "ARG =foo",
expectedStderr: "ARG names can not be blank",
},
}
for _, tc := range testCases {
buildImage(name, build.WithDockerfile(fmt.Sprintf(`FROM busybox
%s`, tc.expression))).Assert(c, icmd.Expected{
ExitCode: 1,
Err: tc.expectedStderr,
})
}
}
func (s *DockerSuite) TestBuildEnv(c *check.C) {
testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows
name := "testbuildenv"
expected := "[PATH=/test:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
ENV PATH /test:$PATH
ENV PORT 2375
RUN [ $(env | grep PORT) = 'PORT=2375' ]`))
res := inspectField(c, name, "Config.Env")
if res != expected {
c.Fatalf("Env %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildPATH(c *check.C) {
testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows
defPath := "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
fn := func(dockerfile string, expected string) {
buildImageSuccessfully(c, "testbldpath", build.WithDockerfile(dockerfile))
res := inspectField(c, "testbldpath", "Config.Env")
if res != expected {
c.Fatalf("Env %q, expected %q for dockerfile:%q", res, expected, dockerfile)
}
}
tests := []struct{ dockerfile, exp string }{
{"FROM scratch\nMAINTAINER me", "[PATH=" + defPath + "]"},
{"FROM busybox\nMAINTAINER me", "[PATH=" + defPath + "]"},
{"FROM scratch\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"},
{"FROM busybox\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"},
{"FROM scratch\nENV PATH=/test", "[PATH=/test]"},
{"FROM busybox\nENV PATH=/test", "[PATH=/test]"},
{"FROM scratch\nENV PATH=''", "[PATH=]"},
{"FROM busybox\nENV PATH=''", "[PATH=]"},
}
for _, test := range tests {
fn(test.dockerfile, test.exp)
}
}
func (s *DockerSuite) TestBuildContextCleanup(c *check.C) {
testRequires(c, SameHostDaemon)
name := "testbuildcontextcleanup"
entries, err := ioutil.ReadDir(filepath.Join(testEnv.DockerBasePath(), "tmp"))
if err != nil {
c.Fatalf("failed to list contents of tmp dir: %s", err)
}
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
ENTRYPOINT ["/bin/echo"]`))
entriesFinal, err := ioutil.ReadDir(filepath.Join(testEnv.DockerBasePath(), "tmp"))
if err != nil {
c.Fatalf("failed to list contents of tmp dir: %s", err)
}
if err = testutil.CompareDirectoryEntries(entries, entriesFinal); err != nil {
c.Fatalf("context should have been deleted, but wasn't")
}
}
func (s *DockerSuite) TestBuildContextCleanupFailedBuild(c *check.C) {
testRequires(c, SameHostDaemon)
name := "testbuildcontextcleanup"
entries, err := ioutil.ReadDir(filepath.Join(testEnv.DockerBasePath(), "tmp"))
if err != nil {
c.Fatalf("failed to list contents of tmp dir: %s", err)
}
buildImage(name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
RUN /non/existing/command`)).Assert(c, icmd.Expected{
ExitCode: 1,
})
entriesFinal, err := ioutil.ReadDir(filepath.Join(testEnv.DockerBasePath(), "tmp"))
if err != nil {
c.Fatalf("failed to list contents of tmp dir: %s", err)
}
if err = testutil.CompareDirectoryEntries(entries, entriesFinal); err != nil {
c.Fatalf("context should have been deleted, but wasn't")
}
}
func (s *DockerSuite) TestBuildCmd(c *check.C) {
name := "testbuildcmd"
expected := "[/bin/echo Hello World]"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
CMD ["/bin/echo", "Hello World"]`))
res := inspectField(c, name, "Config.Cmd")
if res != expected {
c.Fatalf("Cmd %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildExpose(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
name := "testbuildexpose"
expected := "map[2375/tcp:{}]"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch
EXPOSE 2375`))
res := inspectField(c, name, "Config.ExposedPorts")
if res != expected {
c.Fatalf("Exposed ports %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildExposeMorePorts(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
// start building docker file with a large number of ports
portList := make([]string, 50)
line := make([]string, 100)
expectedPorts := make([]int, len(portList)*len(line))
for i := 0; i < len(portList); i++ {
for j := 0; j < len(line); j++ {
p := i*len(line) + j + 1
line[j] = strconv.Itoa(p)
expectedPorts[p-1] = p
}
if i == len(portList)-1 {
portList[i] = strings.Join(line, " ")
} else {
portList[i] = strings.Join(line, " ") + ` \`
}
}
dockerfile := `FROM scratch
EXPOSE {{range .}} {{.}}
{{end}}`
tmpl := template.Must(template.New("dockerfile").Parse(dockerfile))
buf := bytes.NewBuffer(nil)
tmpl.Execute(buf, portList)
name := "testbuildexpose"
buildImageSuccessfully(c, name, build.WithDockerfile(buf.String()))
// check if all the ports are saved inside Config.ExposedPorts
res := inspectFieldJSON(c, name, "Config.ExposedPorts")
var exposedPorts map[string]interface{}
if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil {
c.Fatal(err)
}
for _, p := range expectedPorts {
ep := fmt.Sprintf("%d/tcp", p)
if _, ok := exposedPorts[ep]; !ok {
c.Errorf("Port(%s) is not exposed", ep)
} else {
delete(exposedPorts, ep)
}
}
if len(exposedPorts) != 0 {
c.Errorf("Unexpected extra exposed ports %v", exposedPorts)
}
}
func (s *DockerSuite) TestBuildExposeOrder(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
buildID := func(name, exposed string) string {
buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`FROM scratch
EXPOSE %s`, exposed)))
id := inspectField(c, name, "Id")
return id
}
id1 := buildID("testbuildexpose1", "80 2375")
id2 := buildID("testbuildexpose2", "2375 80")
if id1 != id2 {
c.Errorf("EXPOSE should invalidate the cache only when ports actually changed")
}
}
func (s *DockerSuite) TestBuildExposeUpperCaseProto(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
name := "testbuildexposeuppercaseproto"
expected := "map[5678/udp:{}]"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch
EXPOSE 5678/UDP`))
res := inspectField(c, name, "Config.ExposedPorts")
if res != expected {
c.Fatalf("Exposed ports %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildEmptyEntrypointInheritance(c *check.C) {
name := "testbuildentrypointinheritance"
name2 := "testbuildentrypointinheritance2"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
ENTRYPOINT ["/bin/echo"]`))
res := inspectField(c, name, "Config.Entrypoint")
expected := "[/bin/echo]"
if res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
buildImageSuccessfully(c, name2, build.WithDockerfile(fmt.Sprintf(`FROM %s
ENTRYPOINT []`, name)))
res = inspectField(c, name2, "Config.Entrypoint")
expected = "[]"
if res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildEmptyEntrypoint(c *check.C) {
name := "testbuildentrypoint"
expected := "[]"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
ENTRYPOINT []`))
res := inspectField(c, name, "Config.Entrypoint")
if res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildEntrypoint(c *check.C) {
name := "testbuildentrypoint"
expected := "[/bin/echo]"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
ENTRYPOINT ["/bin/echo"]`))
res := inspectField(c, name, "Config.Entrypoint")
if res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
}
// #6445 ensure ONBUILD triggers aren't committed to grandchildren
func (s *DockerSuite) TestBuildOnBuildLimitedInheritence(c *check.C) {
buildImageSuccessfully(c, "testonbuildtrigger1", build.WithDockerfile(`
FROM busybox
RUN echo "GRANDPARENT"
ONBUILD RUN echo "ONBUILD PARENT"
`))
// ONBUILD should be run in second build.
buildImage("testonbuildtrigger2", build.WithDockerfile("FROM testonbuildtrigger1")).Assert(c, icmd.Expected{
Out: "ONBUILD PARENT",
})
// ONBUILD should *not* be run in third build.
result := buildImage("testonbuildtrigger3", build.WithDockerfile("FROM testonbuildtrigger2"))
result.Assert(c, icmd.Success)
if strings.Contains(result.Combined(), "ONBUILD PARENT") {
c.Fatalf("ONBUILD instruction ran in grandchild of ONBUILD parent")
}
}
func (s *DockerSuite) TestBuildSameDockerfileWithAndWithoutCache(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
name := "testbuildwithcache"
dockerfile := `FROM scratch
MAINTAINER dockerio
EXPOSE 5432
ENTRYPOINT ["/bin/echo"]`
buildImageSuccessfully(c, name, build.WithDockerfile(dockerfile))
id1 := getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithDockerfile(dockerfile))
id2 := getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithoutCache, build.WithDockerfile(dockerfile))
id3 := getIDByName(c, name)
if id1 != id2 {
c.Fatal("The cache should have been used but hasn't.")
}
if id1 == id3 {
c.Fatal("The cache should have been invalided but hasn't.")
}
}
// Make sure that ADD/COPY still populate the cache even if they don't use it
func (s *DockerSuite) TestBuildConditionalCache(c *check.C) {
name := "testbuildconditionalcache"
dockerfile := `
FROM busybox
ADD foo /tmp/`
ctx := fakeContext(c, dockerfile, map[string]string{
"foo": "hello",
})
defer ctx.Close()
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
id1 := getIDByName(c, name)
if err := ctx.Add("foo", "bye"); err != nil {
c.Fatalf("Error modifying foo: %s", err)
}
// Updating a file should invalidate the cache
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
id2 := getIDByName(c, name)
if id2 == id1 {
c.Fatal("Should not have used the cache")
}
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
id3 := getIDByName(c, name)
if id3 != id2 {
c.Fatal("Should have used the cache")
}
}
// FIXME(vdemeester) this really seems to test the same thing as before
func (s *DockerSuite) TestBuildAddMultipleLocalFileWithAndWithoutCache(c *check.C) {
name := "testbuildaddmultiplelocalfilewithcache"
dockerfile := `
FROM busybox
MAINTAINER dockerio
ADD foo Dockerfile /usr/lib/bla/
RUN sh -c "[ $(cat /usr/lib/bla/foo) = "hello" ]"`
ctx := fakeContext(c, dockerfile, map[string]string{
"foo": "hello",
})
defer ctx.Close()
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
id1 := getIDByName(c, name)
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
id2 := getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithoutCache, withExternalBuildContext(ctx))
id3 := getIDByName(c, name)
if id1 != id2 {
c.Fatal("The cache should have been used but hasn't.")
}
if id1 == id3 {
c.Fatal("The cache should have been invalided but hasn't.")
}
}
func (s *DockerSuite) TestBuildCopyDirButNotFile(c *check.C) {
name := "testbuildcopydirbutnotfile"
name2 := "testbuildcopydirbutnotfile2"
dockerfile := `
FROM ` + minimalBaseImage() + `
COPY dir /tmp/`
ctx := fakeContext(c, dockerfile, map[string]string{
"dir/foo": "hello",
})
defer ctx.Close()
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
id1 := getIDByName(c, name)
// Check that adding file with similar name doesn't mess with cache
if err := ctx.Add("dir_file", "hello2"); err != nil {
c.Fatal(err)
}
buildImageSuccessfully(c, name2, withExternalBuildContext(ctx))
id2 := getIDByName(c, name2)
if id1 != id2 {
c.Fatal("The cache should have been used but wasn't")
}
}
func (s *DockerSuite) TestBuildAddCurrentDirWithCache(c *check.C) {
name := "testbuildaddcurrentdirwithcache"
name2 := name + "2"
name3 := name + "3"
name4 := name + "4"
dockerfile := `
FROM ` + minimalBaseImage() + `
MAINTAINER dockerio
ADD . /usr/lib/bla`
ctx := fakeContext(c, dockerfile, map[string]string{
"foo": "hello",
})
defer ctx.Close()
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
id1 := getIDByName(c, name)
// Check that adding file invalidate cache of "ADD ."
if err := ctx.Add("bar", "hello2"); err != nil {
c.Fatal(err)
}
buildImageSuccessfully(c, name2, withExternalBuildContext(ctx))
id2 := getIDByName(c, name2)
if id1 == id2 {
c.Fatal("The cache should have been invalided but hasn't.")
}
// Check that changing file invalidate cache of "ADD ."
if err := ctx.Add("foo", "hello1"); err != nil {
c.Fatal(err)
}
buildImageSuccessfully(c, name3, withExternalBuildContext(ctx))
id3 := getIDByName(c, name3)
if id2 == id3 {
c.Fatal("The cache should have been invalided but hasn't.")
}
// Check that changing file to same content with different mtime does not
// invalidate cache of "ADD ."
time.Sleep(1 * time.Second) // wait second because of mtime precision
if err := ctx.Add("foo", "hello1"); err != nil {
c.Fatal(err)
}
buildImageSuccessfully(c, name4, withExternalBuildContext(ctx))
id4 := getIDByName(c, name4)
if id3 != id4 {
c.Fatal("The cache should have been used but hasn't.")
}
}
// FIXME(vdemeester) this really seems to test the same thing as before (TestBuildAddMultipleLocalFileWithAndWithoutCache)
func (s *DockerSuite) TestBuildAddCurrentDirWithoutCache(c *check.C) {
name := "testbuildaddcurrentdirwithoutcache"
dockerfile := `
FROM ` + minimalBaseImage() + `
MAINTAINER dockerio
ADD . /usr/lib/bla`
ctx := fakeContext(c, dockerfile, map[string]string{
"foo": "hello",
})
defer ctx.Close()
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
id1 := getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithoutCache, withExternalBuildContext(ctx))
id2 := getIDByName(c, name)
if id1 == id2 {
c.Fatal("The cache should have been invalided but hasn't.")
}
}
func (s *DockerSuite) TestBuildAddRemoteFileWithAndWithoutCache(c *check.C) {
name := "testbuildaddremotefilewithcache"
server := fakeStorage(c, map[string]string{
"baz": "hello",
})
defer server.Close()
dockerfile := fmt.Sprintf(`FROM `+minimalBaseImage()+`
MAINTAINER dockerio
ADD %s/baz /usr/lib/baz/quux`, server.URL())
buildImageSuccessfully(c, name, build.WithDockerfile(dockerfile))
id1 := getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithDockerfile(dockerfile))
id2 := getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithoutCache, build.WithDockerfile(dockerfile))
id3 := getIDByName(c, name)
if id1 != id2 {
c.Fatal("The cache should have been used but hasn't.")
}
if id1 == id3 {
c.Fatal("The cache should have been invalided but hasn't.")
}
}
func (s *DockerSuite) TestBuildAddRemoteFileMTime(c *check.C) {
name := "testbuildaddremotefilemtime"
name2 := name + "2"
name3 := name + "3"
files := map[string]string{"baz": "hello"}
server := fakeStorage(c, files)
defer server.Close()
ctx := fakeContext(c, fmt.Sprintf(`FROM `+minimalBaseImage()+`
MAINTAINER dockerio
ADD %s/baz /usr/lib/baz/quux`, server.URL()), nil)
defer ctx.Close()
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
id1 := getIDByName(c, name)
buildImageSuccessfully(c, name2, withExternalBuildContext(ctx))
id2 := getIDByName(c, name2)
if id1 != id2 {
c.Fatal("The cache should have been used but wasn't - #1")
}
// Now create a different server with same contents (causes different mtime)
// The cache should still be used
// allow some time for clock to pass as mtime precision is only 1s
time.Sleep(2 * time.Second)
server2 := fakeStorage(c, files)
defer server2.Close()
ctx2 := fakeContext(c, fmt.Sprintf(`FROM `+minimalBaseImage()+`
MAINTAINER dockerio
ADD %s/baz /usr/lib/baz/quux`, server2.URL()), nil)
defer ctx2.Close()
buildImageSuccessfully(c, name3, withExternalBuildContext(ctx2))
id3 := getIDByName(c, name3)
if id1 != id3 {
c.Fatal("The cache should have been used but wasn't")
}
}
// FIXME(vdemeester) this really seems to test the same thing as before (combined)
func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithAndWithoutCache(c *check.C) {
name := "testbuildaddlocalandremotefilewithcache"
server := fakeStorage(c, map[string]string{
"baz": "hello",
})
defer server.Close()
ctx := fakeContext(c, fmt.Sprintf(`FROM `+minimalBaseImage()+`
MAINTAINER dockerio
ADD foo /usr/lib/bla/bar
ADD %s/baz /usr/lib/baz/quux`, server.URL()),
map[string]string{
"foo": "hello world",
})
defer ctx.Close()
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
id1 := getIDByName(c, name)
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
id2 := getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithoutCache, withExternalBuildContext(ctx))
id3 := getIDByName(c, name)
if id1 != id2 {
c.Fatal("The cache should have been used but hasn't.")
}
if id1 == id3 {
c.Fatal("The cache should have been invalidated but hasn't.")
}
}
func testContextTar(c *check.C, compression archive.Compression) {
ctx := fakeContext(c,
`FROM busybox
ADD foo /foo
CMD ["cat", "/foo"]`,
map[string]string{
"foo": "bar",
},
)
defer ctx.Close()
context, err := archive.Tar(ctx.Dir, compression)
if err != nil {
c.Fatalf("failed to build context tar: %v", err)
}
name := "contexttar"
icmd.RunCmd(icmd.Cmd{
Command: []string{dockerBinary, "build", "-t", name, "-"},
Stdin: context,
}).Assert(c, icmd.Success)
}
func (s *DockerSuite) TestBuildContextTarGzip(c *check.C) {
testContextTar(c, archive.Gzip)
}
func (s *DockerSuite) TestBuildContextTarNoCompression(c *check.C) {
testContextTar(c, archive.Uncompressed)
}
func (s *DockerSuite) TestBuildNoContext(c *check.C) {
name := "nocontext"
icmd.RunCmd(icmd.Cmd{
Command: []string{dockerBinary, "build", "-t", name, "-"},
Stdin: strings.NewReader(
`FROM busybox
CMD ["echo", "ok"]`),
}).Assert(c, icmd.Success)
if out, _ := dockerCmd(c, "run", "--rm", "nocontext"); out != "ok\n" {
c.Fatalf("run produced invalid output: %q, expected %q", out, "ok")
}
}
func (s *DockerSuite) TestBuildWithVolumeOwnership(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildimg"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox:latest
RUN mkdir /test && chown daemon:daemon /test && chmod 0600 /test
VOLUME /test`))
out, _ := dockerCmd(c, "run", "--rm", "testbuildimg", "ls", "-la", "/test")
if expected := "drw-------"; !strings.Contains(out, expected) {
c.Fatalf("expected %s received %s", expected, out)
}
if expected := "daemon daemon"; !strings.Contains(out, expected) {
c.Fatalf("expected %s received %s", expected, out)
}
}
// testing #1405 - config.Cmd does not get cleaned up if
// utilizing cache
func (s *DockerSuite) TestBuildEntrypointRunCleanup(c *check.C) {
name := "testbuildcmdcleanup"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
RUN echo "hello"`))
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", `FROM busybox
RUN echo "hello"
ADD foo /foo
ENTRYPOINT ["/bin/echo"]`),
withFile("foo", "hello")))
res := inspectField(c, name, "Config.Cmd")
// Cmd must be cleaned up
if res != "[]" {
c.Fatalf("Cmd %s, expected nil", res)
}
}
func (s *DockerSuite) TestBuildAddFileNotFound(c *check.C) {
name := "testbuildaddnotfound"
expected := "foo: no such file or directory"
if testEnv.DaemonPlatform() == "windows" {
expected = "foo: The system cannot find the file specified"
}
buildImage(name, withBuildContext(c,
withFile("Dockerfile", `FROM `+minimalBaseImage()+`
ADD foo /usr/local/bar`),
withFile("bar", "hello"))).Assert(c, icmd.Expected{
ExitCode: 1,
Err: expected,
})
}
func (s *DockerSuite) TestBuildInheritance(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildinheritance"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch
EXPOSE 2375`))
ports1 := inspectField(c, name, "Config.ExposedPorts")
buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`FROM %s
ENTRYPOINT ["/bin/echo"]`, name)))
res := inspectField(c, name, "Config.Entrypoint")
if expected := "[/bin/echo]"; res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
ports2 := inspectField(c, name, "Config.ExposedPorts")
if ports1 != ports2 {
c.Fatalf("Ports must be same: %s != %s", ports1, ports2)
}
}
func (s *DockerSuite) TestBuildFails(c *check.C) {
name := "testbuildfails"
buildImage(name, build.WithDockerfile(`FROM busybox
RUN sh -c "exit 23"`)).Assert(c, icmd.Expected{
ExitCode: 23,
Err: "returned a non-zero code: 23",
})
}
func (s *DockerSuite) TestBuildOnBuild(c *check.C) {
name := "testbuildonbuild"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
ONBUILD RUN touch foobar`))
buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`FROM %s
RUN [ -f foobar ]`, name)))
}
// gh #2446
func (s *DockerSuite) TestBuildAddToSymlinkDest(c *check.C) {
makeLink := `ln -s /foo /bar`
if testEnv.DaemonPlatform() == "windows" {
makeLink = `mklink /D C:\bar C:\foo`
}
name := "testbuildaddtosymlinkdest"
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", `
FROM busybox
RUN sh -c "mkdir /foo"
RUN `+makeLink+`
ADD foo /bar/
RUN sh -c "[ -f /bar/foo ]"
RUN sh -c "[ -f /foo/foo ]"`),
withFile("foo", "hello"),
))
}
func (s *DockerSuite) TestBuildEscapeWhitespace(c *check.C) {
name := "testbuildescapewhitespace"
buildImageSuccessfully(c, name, build.WithDockerfile(`
# ESCAPE=\
FROM busybox
MAINTAINER "Docker \
IO <io@\
docker.com>"
`))
res := inspectField(c, name, "Author")
if res != "\"Docker IO <[email protected]>\"" {
c.Fatalf("Parsed string did not match the escaped string. Got: %q", res)
}
}
func (s *DockerSuite) TestBuildVerifyIntString(c *check.C) {
// Verify that strings that look like ints are still passed as strings
name := "testbuildstringing"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM busybox
MAINTAINER 123`))
out, _ := dockerCmd(c, "inspect", name)
if !strings.Contains(out, "\"123\"") {
c.Fatalf("Output does not contain the int as a string:\n%s", out)
}
}
func (s *DockerSuite) TestBuildDockerignore(c *check.C) {
name := "testbuilddockerignore"
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", `
FROM busybox
ADD . /bla
RUN sh -c "[[ -f /bla/src/x.go ]]"
RUN sh -c "[[ -f /bla/Makefile ]]"
RUN sh -c "[[ ! -e /bla/src/_vendor ]]"
RUN sh -c "[[ ! -e /bla/.gitignore ]]"
RUN sh -c "[[ ! -e /bla/README.md ]]"
RUN sh -c "[[ ! -e /bla/dir/foo ]]"
RUN sh -c "[[ ! -e /bla/foo ]]"
RUN sh -c "[[ ! -e /bla/.git ]]"
RUN sh -c "[[ ! -e v.cc ]]"
RUN sh -c "[[ ! -e src/v.cc ]]"
RUN sh -c "[[ ! -e src/_vendor/v.cc ]]"`),
withFile("Makefile", "all:"),
withFile(".git/HEAD", "ref: foo"),
withFile("src/x.go", "package main"),
withFile("src/_vendor/v.go", "package main"),
withFile("src/_vendor/v.cc", "package main"),
withFile("src/v.cc", "package main"),
withFile("v.cc", "package main"),
withFile("dir/foo", ""),
withFile(".gitignore", ""),
withFile("README.md", "readme"),
withFile(".dockerignore", `
.git
pkg
.gitignore
src/_vendor
*.md
**/*.cc
dir`),
))
}
func (s *DockerSuite) TestBuildDockerignoreCleanPaths(c *check.C) {
name := "testbuilddockerignorecleanpaths"
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", `
FROM busybox
ADD . /tmp/
RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (! ls /tmp/dir1/foo)"`),
withFile("foo", "foo"),
withFile("foo2", "foo2"),
withFile("dir1/foo", "foo in dir1"),
withFile(".dockerignore", "./foo\ndir1//foo\n./dir1/../foo2"),
))
}
func (s *DockerSuite) TestBuildDockerignoreExceptions(c *check.C) {
name := "testbuilddockerignoreexceptions"
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", `
FROM busybox
ADD . /bla
RUN sh -c "[[ -f /bla/src/x.go ]]"
RUN sh -c "[[ -f /bla/Makefile ]]"
RUN sh -c "[[ ! -e /bla/src/_vendor ]]"
RUN sh -c "[[ ! -e /bla/.gitignore ]]"
RUN sh -c "[[ ! -e /bla/README.md ]]"
RUN sh -c "[[ -e /bla/dir/dir/foo ]]"
RUN sh -c "[[ ! -e /bla/dir/foo1 ]]"
RUN sh -c "[[ -f /bla/dir/e ]]"
RUN sh -c "[[ -f /bla/dir/e-dir/foo ]]"
RUN sh -c "[[ ! -e /bla/foo ]]"
RUN sh -c "[[ ! -e /bla/.git ]]"
RUN sh -c "[[ -e /bla/dir/a.cc ]]"`),
withFile("Makefile", "all:"),
withFile(".git/HEAD", "ref: foo"),
withFile("src/x.go", "package main"),
withFile("src/_vendor/v.go", "package main"),
withFile("dir/foo", ""),
withFile("dir/foo1", ""),
withFile("dir/dir/f1", ""),
withFile("dir/dir/foo", ""),
withFile("dir/e", ""),
withFile("dir/e-dir/foo", ""),
withFile(".gitignore", ""),
withFile("README.md", "readme"),
withFile("dir/a.cc", "hello"),
withFile(".dockerignore", `
.git
pkg
.gitignore
src/_vendor
*.md
dir
!dir/e*
!dir/dir/foo
**/*.cc
!**/*.cc`),
))
}
func (s *DockerSuite) TestBuildDockerignoringDockerfile(c *check.C) {
name := "testbuilddockerignoredockerfile"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN sh -c "! ls /tmp/Dockerfile"
RUN ls /tmp/.dockerignore`
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", dockerfile),
withFile(".dockerignore", "Dockerfile\n"),
))
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", dockerfile),
withFile(".dockerignore", "./Dockerfile\n"),
))
}
func (s *DockerSuite) TestBuildDockerignoringRenamedDockerfile(c *check.C) {
name := "testbuilddockerignoredockerfile"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN ls /tmp/Dockerfile
RUN sh -c "! ls /tmp/MyDockerfile"
RUN ls /tmp/.dockerignore`
buildImageSuccessfully(c, name, cli.WithFlags("-f", "MyDockerfile"), withBuildContext(c,
withFile("Dockerfile", "Should not use me"),
withFile("MyDockerfile", dockerfile),
withFile(".dockerignore", "MyDockerfile\n"),
))
buildImageSuccessfully(c, name, cli.WithFlags("-f", "MyDockerfile"), withBuildContext(c,
withFile("Dockerfile", "Should not use me"),
withFile("MyDockerfile", dockerfile),
withFile(".dockerignore", "./MyDockerfile\n"),
))
}
func (s *DockerSuite) TestBuildDockerignoringDockerignore(c *check.C) {
name := "testbuilddockerignoredockerignore"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN sh -c "! ls /tmp/.dockerignore"
RUN ls /tmp/Dockerfile`
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", dockerfile),
withFile(".dockerignore", ".dockerignore\n"),
))
}
func (s *DockerSuite) TestBuildDockerignoreTouchDockerfile(c *check.C) {
name := "testbuilddockerignoretouchdockerfile"
dockerfile := `
FROM busybox
ADD . /tmp/`
ctx := fakeContext(c, dockerfile, map[string]string{
"Dockerfile": dockerfile,
".dockerignore": "Dockerfile\n",
})
defer ctx.Close()
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
id1 := getIDByName(c, name)
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
id2 := getIDByName(c, name)
if id1 != id2 {
c.Fatalf("Didn't use the cache - 1")
}
// Now make sure touching Dockerfile doesn't invalidate the cache
if err := ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil {
c.Fatalf("Didn't add Dockerfile: %s", err)
}
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
id2 = getIDByName(c, name)
if id1 != id2 {
c.Fatalf("Didn't use the cache - 2")
}
// One more time but just 'touch' it instead of changing the content
if err := ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil {
c.Fatalf("Didn't add Dockerfile: %s", err)
}
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
id2 = getIDByName(c, name)
if id1 != id2 {
c.Fatalf("Didn't use the cache - 3")
}
}
func (s *DockerSuite) TestBuildDockerignoringWholeDir(c *check.C) {
name := "testbuilddockerignorewholedir"
dockerfile := `
FROM busybox
COPY . /
RUN sh -c "[[ ! -e /.gitignore ]]"
RUN sh -c "[[ ! -e /Makefile ]]"`
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", dockerfile),
withFile(".dockerignore", "*\n"),
withFile("Makefile", "all:"),
withFile(".gitignore", ""),
))
}
func (s *DockerSuite) TestBuildDockerignoringOnlyDotfiles(c *check.C) {
name := "testbuilddockerignorewholedir"
dockerfile := `
FROM busybox
COPY . /
RUN sh -c "[[ ! -e /.gitignore ]]"
RUN sh -c "[[ -f /Makefile ]]"`
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", dockerfile),
withFile(".dockerignore", ".*"),
withFile("Makefile", "all:"),
withFile(".gitignore", ""),
))
}
func (s *DockerSuite) TestBuildDockerignoringBadExclusion(c *check.C) {
name := "testbuilddockerignorebadexclusion"
buildImage(name, withBuildContext(c,
withFile("Dockerfile", `
FROM busybox
COPY . /
RUN sh -c "[[ ! -e /.gitignore ]]"
RUN sh -c "[[ -f /Makefile ]]"`),
withFile("Makefile", "all:"),
withFile(".gitignore", ""),
withFile(".dockerignore", "!\n"),
)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "Error checking context: 'illegal exclusion pattern: \"!\"",
})
}
func (s *DockerSuite) TestBuildDockerignoringWildTopDir(c *check.C) {
dockerfile := `
FROM busybox
COPY . /
RUN sh -c "[[ ! -e /.dockerignore ]]"
RUN sh -c "[[ ! -e /Dockerfile ]]"
RUN sh -c "[[ ! -e /file1 ]]"
RUN sh -c "[[ ! -e /dir ]]"`
// All of these should result in ignoring all files
for _, variant := range []string{"**", "**/", "**/**", "*"} {
buildImageSuccessfully(c, "noname", withBuildContext(c,
withFile("Dockerfile", dockerfile),
withFile("file1", ""),
withFile("dir/file1", ""),
withFile(".dockerignore", variant),
))
dockerCmd(c, "rmi", "noname")
}
}
func (s *DockerSuite) TestBuildDockerignoringWildDirs(c *check.C) {
dockerfile := `
FROM busybox
COPY . /
#RUN sh -c "[[ -e /.dockerignore ]]"
RUN sh -c "[[ -e /Dockerfile ]] && \
[[ ! -e /file0 ]] && \
[[ ! -e /dir1/file0 ]] && \
[[ ! -e /dir2/file0 ]] && \
[[ ! -e /file1 ]] && \
[[ ! -e /dir1/file1 ]] && \
[[ ! -e /dir1/dir2/file1 ]] && \
[[ ! -e /dir1/file2 ]] && \
[[ -e /dir1/dir2/file2 ]] && \
[[ ! -e /dir1/dir2/file4 ]] && \
[[ ! -e /dir1/dir2/file5 ]] && \
[[ ! -e /dir1/dir2/file6 ]] && \
[[ ! -e /dir1/dir3/file7 ]] && \
[[ ! -e /dir1/dir3/file8 ]] && \
[[ -e /dir1/dir3 ]] && \
[[ -e /dir1/dir4 ]] && \
[[ ! -e 'dir1/dir5/fileAA' ]] && \
[[ -e 'dir1/dir5/fileAB' ]] && \
[[ -e 'dir1/dir5/fileB' ]]" # "." in pattern means nothing
RUN echo all done!`
dockerignore := `
**/file0
**/*file1
**/dir1/file2
dir1/**/file4
**/dir2/file5
**/dir1/dir2/file6
dir1/dir3/**
**/dir4/**
**/file?A
**/file\?B
**/dir5/file.
`
buildImageSuccessfully(c, "noname", withBuildContext(c,
withFile("Dockerfile", dockerfile),
withFile(".dockerignore", dockerignore),
withFile("dir1/file0", ""),
withFile("dir1/dir2/file0", ""),
withFile("file1", ""),
withFile("dir1/file1", ""),
withFile("dir1/dir2/file1", ""),
withFile("dir1/file2", ""),
withFile("dir1/dir2/file2", ""), // remains
withFile("dir1/dir2/file4", ""),
withFile("dir1/dir2/file5", ""),
withFile("dir1/dir2/file6", ""),
withFile("dir1/dir3/file7", ""),
withFile("dir1/dir3/file8", ""),
withFile("dir1/dir4/file9", ""),
withFile("dir1/dir5/fileAA", ""),
withFile("dir1/dir5/fileAB", ""),
withFile("dir1/dir5/fileB", ""),
))
}
func (s *DockerSuite) TestBuildLineBreak(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildlinebreak"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
RUN sh -c 'echo root:testpass \
> /tmp/passwd'
RUN mkdir -p /var/run/sshd
RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]"
RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`))
}
func (s *DockerSuite) TestBuildEOLInLine(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildeolinline"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
RUN sh -c 'echo root:testpass > /tmp/passwd'
RUN echo "foo \n bar"; echo "baz"
RUN mkdir -p /var/run/sshd
RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]"
RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`))
}
func (s *DockerSuite) TestBuildCommentsShebangs(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildcomments"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
# This is an ordinary comment.
RUN { echo '#!/bin/sh'; echo 'echo hello world'; } > /hello.sh
RUN [ ! -x /hello.sh ]
# comment with line break \
RUN chmod +x /hello.sh
RUN [ -x /hello.sh ]
RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ]
RUN [ "$(/hello.sh)" = "hello world" ]`))
}
func (s *DockerSuite) TestBuildUsersAndGroups(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildusers"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
# Make sure our defaults work
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ]
# TODO decide if "args.user = strconv.Itoa(syscall.Getuid())" is acceptable behavior for changeUser in sysvinit instead of "return nil" when "USER" isn't specified (so that we get the proper group list even if that is the empty list, even in the default case of not supplying an explicit USER to run as, which implies USER 0)
USER root
RUN [ "$(id -G):$(id -Gn)" = '0 10:root wheel' ]
# Setup dockerio user and group
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd && \
echo 'dockerio:x:1001:' >> /etc/group
# Make sure we can switch to our user and all the information is exactly as we expect it to be
USER dockerio
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
# Switch back to root and double check that worked exactly as we might expect it to
USER root
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '0:0/root:root/0 10:root wheel' ] && \
# Add a "supplementary" group for our dockerio user
echo 'supplementary:x:1002:dockerio' >> /etc/group
# ... and then go verify that we get it like we expect
USER dockerio
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ]
USER 1001
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ]
# super test the new "user:group" syntax
USER dockerio:dockerio
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
USER 1001:dockerio
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
USER dockerio:1001
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
USER 1001:1001
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
USER dockerio:supplementary
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ]
USER dockerio:1002
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ]
USER 1001:supplementary
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ]
USER 1001:1002
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ]
# make sure unknown uid/gid still works properly
USER 1042:1043
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ]`))
}
// FIXME(vdemeester) rename this test (and probably "merge" it with the one below TestBuildEnvUsage2)
func (s *DockerSuite) TestBuildEnvUsage(c *check.C) {
// /docker/world/hello is not owned by the correct user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux)
name := "testbuildenvusage"
dockerfile := `FROM busybox
ENV HOME /root
ENV PATH $HOME/bin:$PATH
ENV PATH /tmp:$PATH
RUN [ "$PATH" = "/tmp:$HOME/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ]
ENV FOO /foo/baz
ENV BAR /bar
ENV BAZ $BAR
ENV FOOPATH $PATH:$FOO
RUN [ "$BAR" = "$BAZ" ]
RUN [ "$FOOPATH" = "$PATH:/foo/baz" ]
ENV FROM hello/docker/world
ENV TO /docker/world/hello
ADD $FROM $TO
RUN [ "$(cat $TO)" = "hello" ]
ENV abc=def
ENV ghi=$abc
RUN [ "$ghi" = "def" ]
`
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", dockerfile),
withFile("hello/docker/world", "hello"),
))
}
// FIXME(vdemeester) rename this test (and probably "merge" it with the one above TestBuildEnvUsage)
func (s *DockerSuite) TestBuildEnvUsage2(c *check.C) {
// /docker/world/hello is not owned by the correct user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux)
name := "testbuildenvusage2"
dockerfile := `FROM busybox
ENV abc=def def="hello world"
RUN [ "$abc,$def" = "def,hello world" ]
ENV def=hello\ world v1=abc v2="hi there" v3='boogie nights' v4="with'quotes too"
RUN [ "$def,$v1,$v2,$v3,$v4" = "hello world,abc,hi there,boogie nights,with'quotes too" ]
ENV abc=zzz FROM=hello/docker/world
ENV abc=zzz TO=/docker/world/hello
ADD $FROM $TO
RUN [ "$abc,$(cat $TO)" = "zzz,hello" ]
ENV abc 'yyy'
RUN [ $abc = 'yyy' ]
ENV abc=
RUN [ "$abc" = "" ]
# use grep to make sure if the builder substitutes \$foo by mistake
# we don't get a false positive
ENV abc=\$foo
RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo)
ENV abc \$foo
RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo)
ENV abc=\'foo\' abc2=\"foo\"
RUN [ "$abc,$abc2" = "'foo',\"foo\"" ]
ENV abc "foo"
RUN [ "$abc" = "foo" ]
ENV abc 'foo'
RUN [ "$abc" = 'foo' ]
ENV abc \'foo\'
RUN [ "$abc" = "'foo'" ]
ENV abc \"foo\"
RUN [ "$abc" = '"foo"' ]
ENV abc=ABC
RUN [ "$abc" = "ABC" ]
ENV def1=${abc:-DEF} def2=${ccc:-DEF}
ENV def3=${ccc:-${def2}xx} def4=${abc:+ALT} def5=${def2:+${abc}:} def6=${ccc:-\$abc:} def7=${ccc:-\${abc}:}
RUN [ "$def1,$def2,$def3,$def4,$def5,$def6,$def7" = 'ABC,DEF,DEFxx,ALT,ABC:,$abc:,${abc:}' ]
ENV mypath=${mypath:+$mypath:}/home
ENV mypath=${mypath:+$mypath:}/away
RUN [ "$mypath" = '/home:/away' ]
ENV e1=bar
ENV e2=$e1 e3=$e11 e4=\$e1 e5=\$e11
RUN [ "$e0,$e1,$e2,$e3,$e4,$e5" = ',bar,bar,,$e1,$e11' ]
ENV ee1 bar
ENV ee2 $ee1
ENV ee3 $ee11
ENV ee4 \$ee1
ENV ee5 \$ee11
RUN [ "$ee1,$ee2,$ee3,$ee4,$ee5" = 'bar,bar,,$ee1,$ee11' ]
ENV eee1="foo" eee2='foo'
ENV eee3 "foo"
ENV eee4 'foo'
RUN [ "$eee1,$eee2,$eee3,$eee4" = 'foo,foo,foo,foo' ]
`
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", dockerfile),
withFile("hello/docker/world", "hello"),
))
}
func (s *DockerSuite) TestBuildAddScript(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildaddscript"
dockerfile := `
FROM busybox
ADD test /test
RUN ["chmod","+x","/test"]
RUN ["/test"]
RUN [ "$(cat /testfile)" = 'test!' ]`
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", dockerfile),
withFile("test", "#!/bin/sh\necho 'test!' > /testfile"),
))
}
func (s *DockerSuite) TestBuildAddTar(c *check.C) {
// /test/foo is not owned by the correct user
testRequires(c, NotUserNamespace)
name := "testbuildaddtar"
ctx := func() *FakeContext {
dockerfile := `
FROM busybox
ADD test.tar /
RUN cat /test/foo | grep Hi
ADD test.tar /test.tar
RUN cat /test.tar/test/foo | grep Hi
ADD test.tar /unlikely-to-exist
RUN cat /unlikely-to-exist/test/foo | grep Hi
ADD test.tar /unlikely-to-exist-trailing-slash/
RUN cat /unlikely-to-exist-trailing-slash/test/foo | grep Hi
RUN sh -c "mkdir /existing-directory" #sh -c is needed on Windows to use the correct mkdir
ADD test.tar /existing-directory
RUN cat /existing-directory/test/foo | grep Hi
ADD test.tar /existing-directory-trailing-slash/
RUN cat /existing-directory-trailing-slash/test/foo | grep Hi`
tmpDir, err := ioutil.TempDir("", "fake-context")
c.Assert(err, check.IsNil)
testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
if err != nil {
c.Fatalf("failed to create test.tar archive: %v", err)
}
defer testTar.Close()
tw := tar.NewWriter(testTar)
if err := tw.WriteHeader(&tar.Header{
Name: "test/foo",
Size: 2,
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write([]byte("Hi")); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
c.Fatalf("failed to open destination dockerfile: %v", err)
}
return fakeContextFromDir(tmpDir)
}()
defer ctx.Close()
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
}
func (s *DockerSuite) TestBuildAddBrokenTar(c *check.C) {
name := "testbuildaddbrokentar"
ctx := func() *FakeContext {
dockerfile := `
FROM busybox
ADD test.tar /`
tmpDir, err := ioutil.TempDir("", "fake-context")
c.Assert(err, check.IsNil)
testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
if err != nil {
c.Fatalf("failed to create test.tar archive: %v", err)
}
defer testTar.Close()
tw := tar.NewWriter(testTar)
if err := tw.WriteHeader(&tar.Header{
Name: "test/foo",
Size: 2,
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write([]byte("Hi")); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
// Corrupt the tar by removing one byte off the end
stat, err := testTar.Stat()
if err != nil {
c.Fatalf("failed to stat tar archive: %v", err)
}
if err := testTar.Truncate(stat.Size() - 1); err != nil {
c.Fatalf("failed to truncate tar archive: %v", err)
}
if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
c.Fatalf("failed to open destination dockerfile: %v", err)
}
return fakeContextFromDir(tmpDir)
}()
defer ctx.Close()
buildImage(name, withExternalBuildContext(ctx)).Assert(c, icmd.Expected{
ExitCode: 1,
})
}
func (s *DockerSuite) TestBuildAddNonTar(c *check.C) {
name := "testbuildaddnontar"
// Should not try to extract test.tar
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", `
FROM busybox
ADD test.tar /
RUN test -f /test.tar`),
withFile("test.tar", "not_a_tar_file"),
))
}
func (s *DockerSuite) TestBuildAddTarXz(c *check.C) {
// /test/foo is not owned by the correct user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux)
name := "testbuildaddtarxz"
ctx := func() *FakeContext {
dockerfile := `
FROM busybox
ADD test.tar.xz /
RUN cat /test/foo | grep Hi`
tmpDir, err := ioutil.TempDir("", "fake-context")
c.Assert(err, check.IsNil)
testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
if err != nil {
c.Fatalf("failed to create test.tar archive: %v", err)
}
defer testTar.Close()
tw := tar.NewWriter(testTar)
if err := tw.WriteHeader(&tar.Header{
Name: "test/foo",
Size: 2,
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write([]byte("Hi")); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
icmd.RunCmd(icmd.Cmd{
Command: []string{"xz", "-k", "test.tar"},
Dir: tmpDir,
}).Assert(c, icmd.Success)
if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
c.Fatalf("failed to open destination dockerfile: %v", err)
}
return fakeContextFromDir(tmpDir)
}()
defer ctx.Close()
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
}
func (s *DockerSuite) TestBuildAddTarXzGz(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildaddtarxzgz"
ctx := func() *FakeContext {
dockerfile := `
FROM busybox
ADD test.tar.xz.gz /
RUN ls /test.tar.xz.gz`
tmpDir, err := ioutil.TempDir("", "fake-context")
c.Assert(err, check.IsNil)
testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
if err != nil {
c.Fatalf("failed to create test.tar archive: %v", err)
}
defer testTar.Close()
tw := tar.NewWriter(testTar)
if err := tw.WriteHeader(&tar.Header{
Name: "test/foo",
Size: 2,
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write([]byte("Hi")); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
icmd.RunCmd(icmd.Cmd{
Command: []string{"xz", "-k", "test.tar"},
Dir: tmpDir,
}).Assert(c, icmd.Success)
icmd.RunCmd(icmd.Cmd{
Command: []string{"gzip", "test.tar.xz"},
Dir: tmpDir,
})
if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
c.Fatalf("failed to open destination dockerfile: %v", err)
}
return fakeContextFromDir(tmpDir)
}()
defer ctx.Close()
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
}
func (s *DockerSuite) TestBuildFromGit(c *check.C) {
name := "testbuildfromgit"
git := newFakeGit(c, "repo", map[string]string{
"Dockerfile": `FROM busybox
ADD first /first
RUN [ -f /first ]
MAINTAINER docker`,
"first": "test git data",
}, true)
defer git.Close()
buildImageSuccessfully(c, name, build.WithContextPath(git.RepoURL))
res := inspectField(c, name, "Author")
if res != "docker" {
c.Fatalf("Maintainer should be docker, got %s", res)
}
}
func (s *DockerSuite) TestBuildFromGitWithContext(c *check.C) {
name := "testbuildfromgit"
git := newFakeGit(c, "repo", map[string]string{
"docker/Dockerfile": `FROM busybox
ADD first /first
RUN [ -f /first ]
MAINTAINER docker`,
"docker/first": "test git data",
}, true)
defer git.Close()
buildImageSuccessfully(c, name, build.WithContextPath(fmt.Sprintf("%s#master:docker", git.RepoURL)))
res := inspectField(c, name, "Author")
if res != "docker" {
c.Fatalf("Maintainer should be docker, got %s", res)
}
}
func (s *DockerSuite) TestBuildFromGitwithF(c *check.C) {
name := "testbuildfromgitwithf"
git := newFakeGit(c, "repo", map[string]string{
"myApp/myDockerfile": `FROM busybox
RUN echo hi from Dockerfile`,
}, true)
defer git.Close()
buildImage(name, cli.WithFlags("-f", "myApp/myDockerfile"), build.WithContextPath(git.RepoURL)).Assert(c, icmd.Expected{
Out: "hi from Dockerfile",
})
}
func (s *DockerSuite) TestBuildFromRemoteTarball(c *check.C) {
name := "testbuildfromremotetarball"
buffer := new(bytes.Buffer)
tw := tar.NewWriter(buffer)
defer tw.Close()
dockerfile := []byte(`FROM busybox
MAINTAINER docker`)
if err := tw.WriteHeader(&tar.Header{
Name: "Dockerfile",
Size: int64(len(dockerfile)),
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write(dockerfile); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
server := fakeBinaryStorage(c, map[string]*bytes.Buffer{
"testT.tar": buffer,
})
defer server.Close()
buildImageSuccessfully(c, name, build.WithContextPath(server.URL()+"/testT.tar"))
res := inspectField(c, name, "Author")
if res != "docker" {
c.Fatalf("Maintainer should be docker, got %s", res)
}
}
func (s *DockerSuite) TestBuildCleanupCmdOnEntrypoint(c *check.C) {
name := "testbuildcmdcleanuponentrypoint"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
CMD ["test"]
ENTRYPOINT ["echo"]`))
buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`FROM %s
ENTRYPOINT ["cat"]`, name)))
res := inspectField(c, name, "Config.Cmd")
if res != "[]" {
c.Fatalf("Cmd %s, expected nil", res)
}
res = inspectField(c, name, "Config.Entrypoint")
if expected := "[cat]"; res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildClearCmd(c *check.C) {
name := "testbuildclearcmd"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
ENTRYPOINT ["/bin/bash"]
CMD []`))
res := inspectFieldJSON(c, name, "Config.Cmd")
if res != "[]" {
c.Fatalf("Cmd %s, expected %s", res, "[]")
}
}
func (s *DockerSuite) TestBuildEmptyCmd(c *check.C) {
// Skip on Windows. Base image on Windows has a CMD set in the image.
testRequires(c, DaemonIsLinux)
name := "testbuildemptycmd"
buildImageSuccessfully(c, name, build.WithDockerfile("FROM "+minimalBaseImage()+"\nMAINTAINER quux\n"))
res := inspectFieldJSON(c, name, "Config.Cmd")
if res != "null" {
c.Fatalf("Cmd %s, expected %s", res, "null")
}
}
func (s *DockerSuite) TestBuildOnBuildOutput(c *check.C) {
name := "testbuildonbuildparent"
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nONBUILD RUN echo foo\n"))
buildImage(name, build.WithDockerfile("FROM "+name+"\nMAINTAINER quux\n")).Assert(c, icmd.Expected{
Out: "# Executing 1 build trigger",
})
}
// FIXME(vdemeester) should be a unit test
func (s *DockerSuite) TestBuildInvalidTag(c *check.C) {
name := "abcd:" + stringutils.GenerateRandomAlphaOnlyString(200)
buildImage(name, build.WithDockerfile("FROM "+minimalBaseImage()+"\nMAINTAINER quux\n")).Assert(c, icmd.Expected{
ExitCode: 125,
Err: "invalid reference format",
})
}
func (s *DockerSuite) TestBuildCmdShDashC(c *check.C) {
name := "testbuildcmdshc"
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nCMD echo cmd\n"))
res := inspectFieldJSON(c, name, "Config.Cmd")
expected := `["/bin/sh","-c","echo cmd"]`
if testEnv.DaemonPlatform() == "windows" {
expected = `["cmd","/S","/C","echo cmd"]`
}
if res != expected {
c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res)
}
}
func (s *DockerSuite) TestBuildCmdSpaces(c *check.C) {
// Test to make sure that when we strcat arrays we take into account
// the arg separator to make sure ["echo","hi"] and ["echo hi"] don't
// look the same
name := "testbuildcmdspaces"
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nCMD [\"echo hi\"]\n"))
id1 := getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nCMD [\"echo\", \"hi\"]\n"))
id2 := getIDByName(c, name)
if id1 == id2 {
c.Fatal("Should not have resulted in the same CMD")
}
// Now do the same with ENTRYPOINT
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENTRYPOINT [\"echo hi\"]\n"))
id1 = getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENTRYPOINT [\"echo\", \"hi\"]\n"))
id2 = getIDByName(c, name)
if id1 == id2 {
c.Fatal("Should not have resulted in the same ENTRYPOINT")
}
}
func (s *DockerSuite) TestBuildCmdJSONNoShDashC(c *check.C) {
name := "testbuildcmdjson"
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nCMD [\"echo\", \"cmd\"]"))
res := inspectFieldJSON(c, name, "Config.Cmd")
expected := `["echo","cmd"]`
if res != expected {
c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res)
}
}
func (s *DockerSuite) TestBuildEntrypointCanBeOverridenByChild(c *check.C) {
buildImageSuccessfully(c, "parent", build.WithDockerfile(`
FROM busybox
ENTRYPOINT exit 130
`))
icmd.RunCommand(dockerBinary, "run", "parent").Assert(c, icmd.Expected{
ExitCode: 130,
})
buildImageSuccessfully(c, "child", build.WithDockerfile(`
FROM parent
ENTRYPOINT exit 5
`))
icmd.RunCommand(dockerBinary, "run", "child").Assert(c, icmd.Expected{
ExitCode: 5,
})
}
func (s *DockerSuite) TestBuildEntrypointCanBeOverridenByChildInspect(c *check.C) {
var (
name = "testbuildepinherit"
name2 = "testbuildepinherit2"
expected = `["/bin/sh","-c","echo quux"]`
)
if testEnv.DaemonPlatform() == "windows" {
expected = `["cmd","/S","/C","echo quux"]`
}
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENTRYPOINT /foo/bar"))
buildImageSuccessfully(c, name2, build.WithDockerfile(fmt.Sprintf("FROM %s\nENTRYPOINT echo quux", name)))
res := inspectFieldJSON(c, name2, "Config.Entrypoint")
if res != expected {
c.Fatalf("Expected value %s not in Config.Entrypoint: %s", expected, res)
}
icmd.RunCommand(dockerBinary, "run", name2).Assert(c, icmd.Expected{
Out: "quux",
})
}
func (s *DockerSuite) TestBuildRunShEntrypoint(c *check.C) {
name := "testbuildentrypoint"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
ENTRYPOINT echo`))
dockerCmd(c, "run", "--rm", name)
}
func (s *DockerSuite) TestBuildExoticShellInterpolation(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildexoticshellinterpolation"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM busybox
ENV SOME_VAR a.b.c
RUN [ "$SOME_VAR" = 'a.b.c' ]
RUN [ "${SOME_VAR}" = 'a.b.c' ]
RUN [ "${SOME_VAR%.*}" = 'a.b' ]
RUN [ "${SOME_VAR%%.*}" = 'a' ]
RUN [ "${SOME_VAR#*.}" = 'b.c' ]
RUN [ "${SOME_VAR##*.}" = 'c' ]
RUN [ "${SOME_VAR/c/d}" = 'a.b.d' ]
RUN [ "${#SOME_VAR}" = '5' ]
RUN [ "${SOME_UNSET_VAR:-$SOME_VAR}" = 'a.b.c' ]
RUN [ "${SOME_VAR:+Version: ${SOME_VAR}}" = 'Version: a.b.c' ]
RUN [ "${SOME_UNSET_VAR:+${SOME_VAR}}" = '' ]
RUN [ "${SOME_UNSET_VAR:-${SOME_VAR:-d.e.f}}" = 'a.b.c' ]
`))
}
func (s *DockerSuite) TestBuildVerifySingleQuoteFails(c *check.C) {
// This testcase is supposed to generate an error because the
// JSON array we're passing in on the CMD uses single quotes instead
// of double quotes (per the JSON spec). This means we interpret it
// as a "string" instead of "JSON array" and pass it on to "sh -c" and
// it should barf on it.
name := "testbuildsinglequotefails"
expectedExitCode := 2
if testEnv.DaemonPlatform() == "windows" {
expectedExitCode = 127
}
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
CMD [ '/bin/sh', '-c', 'echo hi' ]`))
icmd.RunCommand(dockerBinary, "run", "--rm", name).Assert(c, icmd.Expected{
ExitCode: expectedExitCode,
})
}
func (s *DockerSuite) TestBuildVerboseOut(c *check.C) {
name := "testbuildverboseout"
expected := "\n123\n"
if testEnv.DaemonPlatform() == "windows" {
expected = "\n123\r\n"
}
buildImage(name, build.WithDockerfile(`FROM busybox
RUN echo 123`)).Assert(c, icmd.Expected{
Out: expected,
})
}
func (s *DockerSuite) TestBuildWithTabs(c *check.C) {
name := "testbuildwithtabs"
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nRUN echo\tone\t\ttwo"))
res := inspectFieldJSON(c, name, "ContainerConfig.Cmd")
expected1 := `["/bin/sh","-c","echo\tone\t\ttwo"]`
expected2 := `["/bin/sh","-c","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates
if testEnv.DaemonPlatform() == "windows" {
expected1 = `["cmd","/S","/C","echo\tone\t\ttwo"]`
expected2 = `["cmd","/S","/C","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates
}
if res != expected1 && res != expected2 {
c.Fatalf("Missing tabs.\nGot: %s\nExp: %s or %s", res, expected1, expected2)
}
}
func (s *DockerSuite) TestBuildLabels(c *check.C) {
name := "testbuildlabel"
expected := `{"License":"GPL","Vendor":"Acme"}`
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
LABEL Vendor=Acme
LABEL License GPL`))
res := inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildLabelsCache(c *check.C) {
name := "testbuildlabelcache"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
LABEL Vendor=Acme`))
id1 := getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
LABEL Vendor=Acme`))
id2 := getIDByName(c, name)
if id1 != id2 {
c.Fatalf("Build 2 should have worked & used cache(%s,%s)", id1, id2)
}
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
LABEL Vendor=Acme1`))
id2 = getIDByName(c, name)
if id1 == id2 {
c.Fatalf("Build 3 should have worked & NOT used cache(%s,%s)", id1, id2)
}
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
LABEL Vendor Acme`))
id2 = getIDByName(c, name)
if id1 != id2 {
c.Fatalf("Build 4 should have worked & used cache(%s,%s)", id1, id2)
}
// Now make sure the cache isn't used by mistake
buildImageSuccessfully(c, name, build.WithoutCache, build.WithDockerfile(`FROM busybox
LABEL f1=b1 f2=b2`))
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
LABEL f1=b1 f2=b2`))
id2 = getIDByName(c, name)
if id1 == id2 {
c.Fatalf("Build 6 should have worked & NOT used the cache(%s,%s)", id1, id2)
}
}
func (s *DockerSuite) TestBuildNotVerboseSuccess(c *check.C) {
// This test makes sure that -q works correctly when build is successful:
// stdout has only the image ID (long image ID) and stderr is empty.
outRegexp := regexp.MustCompile("^(sha256:|)[a-z0-9]{64}\\n$")
buildFlags := cli.WithFlags("-q")
tt := []struct {
Name string
BuildFunc func(string) *icmd.Result
}{
{
Name: "quiet_build_stdin_success",
BuildFunc: func(name string) *icmd.Result {
return buildImage(name, buildFlags, build.WithDockerfile("FROM busybox"))
},
},
{
Name: "quiet_build_ctx_success",
BuildFunc: func(name string) *icmd.Result {
return buildImage(name, buildFlags, withBuildContext(c,
withFile("Dockerfile", "FROM busybox"),
withFile("quiet_build_success_fctx", "test"),
))
},
},
{
Name: "quiet_build_git_success",
BuildFunc: func(name string) *icmd.Result {
git := newFakeGit(c, "repo", map[string]string{
"Dockerfile": "FROM busybox",
}, true)
return buildImage(name, buildFlags, build.WithContextPath(git.RepoURL))
},
},
}
for _, te := range tt {
result := te.BuildFunc(te.Name)
result.Assert(c, icmd.Success)
if outRegexp.Find([]byte(result.Stdout())) == nil {
c.Fatalf("Test %s expected stdout to match the [%v] regexp, but it is [%v]", te.Name, outRegexp, result.Stdout())
}
if result.Stderr() != "" {
c.Fatalf("Test %s expected stderr to be empty, but it is [%#v]", te.Name, result.Stderr())
}
}
}
func (s *DockerSuite) TestBuildNotVerboseFailureWithNonExistImage(c *check.C) {
// This test makes sure that -q works correctly when build fails by
// comparing between the stderr output in quiet mode and in stdout
// and stderr output in verbose mode
testRequires(c, Network)
testName := "quiet_build_not_exists_image"
dockerfile := "FROM busybox11"
quietResult := buildImage(testName, cli.WithFlags("-q"), build.WithDockerfile(dockerfile))
quietResult.Assert(c, icmd.Expected{
ExitCode: 1,
})
result := buildImage(testName, build.WithDockerfile(dockerfile))
result.Assert(c, icmd.Expected{
ExitCode: 1,
})
if quietResult.Stderr() != result.Combined() {
c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", testName, quietResult.Stderr(), result.Combined()))
}
}
func (s *DockerSuite) TestBuildNotVerboseFailure(c *check.C) {
// This test makes sure that -q works correctly when build fails by
// comparing between the stderr output in quiet mode and in stdout
// and stderr output in verbose mode
testCases := []struct {
testName string
dockerfile string
}{
{"quiet_build_no_from_at_the_beginning", "RUN whoami"},
{"quiet_build_unknown_instr", "FROMD busybox"},
}
for _, tc := range testCases {
quietResult := buildImage(tc.testName, cli.WithFlags("-q"), build.WithDockerfile(tc.dockerfile))
quietResult.Assert(c, icmd.Expected{
ExitCode: 1,
})
result := buildImage(tc.testName, build.WithDockerfile(tc.dockerfile))
result.Assert(c, icmd.Expected{
ExitCode: 1,
})
if quietResult.Stderr() != result.Combined() {
c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", tc.testName, quietResult.Stderr(), result.Combined()))
}
}
}
func (s *DockerSuite) TestBuildNotVerboseFailureRemote(c *check.C) {
// This test ensures that when given a wrong URL, stderr in quiet mode and
// stderr in verbose mode are identical.
// TODO(vdemeester) with cobra, stdout has a carriage return too much so this test should not check stdout
URL := "http://something.invalid"
name := "quiet_build_wrong_remote"
quietResult := buildImage(name, cli.WithFlags("-q"), build.WithContextPath(URL))
quietResult.Assert(c, icmd.Expected{
ExitCode: 1,
})
result := buildImage(name, build.WithContextPath(URL))
result.Assert(c, icmd.Expected{
ExitCode: 1,
})
if strings.TrimSpace(quietResult.Stderr()) != strings.TrimSpace(result.Combined()) {
c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", name, quietResult.Stderr(), result.Combined()))
}
}
func (s *DockerSuite) TestBuildStderr(c *check.C) {
// This test just makes sure that no non-error output goes
// to stderr
name := "testbuildstderr"
result := buildImage(name, build.WithDockerfile("FROM busybox\nRUN echo one"))
result.Assert(c, icmd.Success)
// Windows to non-Windows should have a security warning
if runtime.GOOS == "windows" && testEnv.DaemonPlatform() != "windows" && !strings.Contains(result.Stdout(), "SECURITY WARNING:") {
c.Fatalf("Stdout contains unexpected output: %q", result.Stdout())
}
// Stderr should always be empty
if result.Stderr() != "" {
c.Fatalf("Stderr should have been empty, instead it's: %q", result.Stderr())
}
}
func (s *DockerSuite) TestBuildChownSingleFile(c *check.C) {
testRequires(c, UnixCli, DaemonIsLinux) // test uses chown: not available on windows
name := "testbuildchownsinglefile"
ctx := fakeContext(c, `
FROM busybox
COPY test /
RUN ls -l /test
RUN [ $(ls -l /test | awk '{print $3":"$4}') = 'root:root' ]
`, map[string]string{
"test": "test",
})
defer ctx.Close()
if err := os.Chown(filepath.Join(ctx.Dir, "test"), 4242, 4242); err != nil {
c.Fatal(err)
}
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
}
func (s *DockerSuite) TestBuildSymlinkBreakout(c *check.C) {
name := "testbuildsymlinkbreakout"
tmpdir, err := ioutil.TempDir("", name)
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpdir)
ctx := filepath.Join(tmpdir, "context")
if err := os.MkdirAll(ctx, 0755); err != nil {
c.Fatal(err)
}
if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte(`
from busybox
add symlink.tar /
add inject /symlink/
`), 0644); err != nil {
c.Fatal(err)
}
inject := filepath.Join(ctx, "inject")
if err := ioutil.WriteFile(inject, nil, 0644); err != nil {
c.Fatal(err)
}
f, err := os.Create(filepath.Join(ctx, "symlink.tar"))
if err != nil {
c.Fatal(err)
}
w := tar.NewWriter(f)
w.WriteHeader(&tar.Header{
Name: "symlink2",
Typeflag: tar.TypeSymlink,
Linkname: "/../../../../../../../../../../../../../../",
Uid: os.Getuid(),
Gid: os.Getgid(),
})
w.WriteHeader(&tar.Header{
Name: "symlink",
Typeflag: tar.TypeSymlink,
Linkname: filepath.Join("symlink2", tmpdir),
Uid: os.Getuid(),
Gid: os.Getgid(),
})
w.Close()
f.Close()
buildImageSuccessfully(c, name, build.WithoutCache, withExternalBuildContext(fakeContextFromDir(ctx)))
if _, err := os.Lstat(filepath.Join(tmpdir, "inject")); err == nil {
c.Fatal("symlink breakout - inject")
} else if !os.IsNotExist(err) {
c.Fatalf("unexpected error: %v", err)
}
}
func (s *DockerSuite) TestBuildXZHost(c *check.C) {
// /usr/local/sbin/xz gets permission denied for the user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux)
name := "testbuildxzhost"
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", `
FROM busybox
ADD xz /usr/local/sbin/
RUN chmod 755 /usr/local/sbin/xz
ADD test.xz /
RUN [ ! -e /injected ]`),
withFile("test.xz", "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00"+"\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x3f\xfd"+"\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21"),
withFile("xz", "#!/bin/sh\ntouch /injected"),
))
}
func (s *DockerSuite) TestBuildVolumesRetainContents(c *check.C) {
// /foo/file gets permission denied for the user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux) // TODO Windows: Issue #20127
var (
name = "testbuildvolumescontent"
expected = "some text"
volName = "/foo"
)
if testEnv.DaemonPlatform() == "windows" {
volName = "C:/foo"
}
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", `
FROM busybox
COPY content /foo/file
VOLUME `+volName+`
CMD cat /foo/file`),
withFile("content", expected),
))
out, _ := dockerCmd(c, "run", "--rm", name)
if out != expected {
c.Fatalf("expected file contents for /foo/file to be %q but received %q", expected, out)
}
}
// FIXME(vdemeester) part of this should be unit test, other part should be clearer
func (s *DockerSuite) TestBuildRenamedDockerfile(c *check.C) {
ctx := fakeContext(c, `FROM busybox
RUN echo from Dockerfile`,
map[string]string{
"Dockerfile": "FROM busybox\nRUN echo from Dockerfile",
"files/Dockerfile": "FROM busybox\nRUN echo from files/Dockerfile",
"files/dFile": "FROM busybox\nRUN echo from files/dFile",
"dFile": "FROM busybox\nRUN echo from dFile",
"files/dFile2": "FROM busybox\nRUN echo from files/dFile2",
})
defer ctx.Close()
out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".")
if err != nil {
c.Fatalf("Failed to build: %s\n%s", out, err)
}
if !strings.Contains(out, "from Dockerfile") {
c.Fatalf("test1 should have used Dockerfile, output:%s", out)
}
out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-f", filepath.Join("files", "Dockerfile"), "-t", "test2", ".")
if err != nil {
c.Fatal(err)
}
if !strings.Contains(out, "from files/Dockerfile") {
c.Fatalf("test2 should have used files/Dockerfile, output:%s", out)
}
out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", filepath.Join("files", "dFile")), "-t", "test3", ".")
if err != nil {
c.Fatal(err)
}
if !strings.Contains(out, "from files/dFile") {
c.Fatalf("test3 should have used files/dFile, output:%s", out)
}
out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "--file=dFile", "-t", "test4", ".")
if err != nil {
c.Fatal(err)
}
if !strings.Contains(out, "from dFile") {
c.Fatalf("test4 should have used dFile, output:%s", out)
}
dirWithNoDockerfile, err := ioutil.TempDir(os.TempDir(), "test5")
c.Assert(err, check.IsNil)
nonDockerfileFile := filepath.Join(dirWithNoDockerfile, "notDockerfile")
if _, err = os.Create(nonDockerfileFile); err != nil {
c.Fatal(err)
}
out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", nonDockerfileFile), "-t", "test5", ".")
if err == nil {
c.Fatalf("test5 was supposed to fail to find passwd")
}
if expected := fmt.Sprintf("The Dockerfile (%s) must be within the build context (.)", nonDockerfileFile); !strings.Contains(out, expected) {
c.Fatalf("wrong error message:%v\nexpected to contain=%v", out, expected)
}
out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test6", "..")
if err != nil {
c.Fatalf("test6 failed: %s", err)
}
if !strings.Contains(out, "from Dockerfile") {
c.Fatalf("test6 should have used root Dockerfile, output:%s", out)
}
out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join(ctx.Dir, "files", "Dockerfile"), "-t", "test7", "..")
if err != nil {
c.Fatalf("test7 failed: %s", err)
}
if !strings.Contains(out, "from files/Dockerfile") {
c.Fatalf("test7 should have used files Dockerfile, output:%s", out)
}
out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test8", ".")
if err == nil || !strings.Contains(out, "must be within the build context") {
c.Fatalf("test8 should have failed with Dockerfile out of context: %s", err)
}
tmpDir := os.TempDir()
out, _, err = dockerCmdInDir(c, tmpDir, "build", "-t", "test9", ctx.Dir)
if err != nil {
c.Fatalf("test9 - failed: %s", err)
}
if !strings.Contains(out, "from Dockerfile") {
c.Fatalf("test9 should have used root Dockerfile, output:%s", out)
}
out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", "dFile2", "-t", "test10", ".")
if err != nil {
c.Fatalf("test10 should have worked: %s", err)
}
if !strings.Contains(out, "from files/dFile2") {
c.Fatalf("test10 should have used files/dFile2, output:%s", out)
}
}
func (s *DockerSuite) TestBuildFromMixedcaseDockerfile(c *check.C) {
testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows
testRequires(c, DaemonIsLinux)
// If Dockerfile is not present, use dockerfile
buildImage("test1", withBuildContext(c,
withFile("dockerfile", `FROM busybox
RUN echo from dockerfile`),
)).Assert(c, icmd.Expected{
Out: "from dockerfile",
})
// Prefer Dockerfile in place of dockerfile
buildImage("test1", withBuildContext(c,
withFile("dockerfile", `FROM busybox
RUN echo from dockerfile`),
withFile("Dockerfile", `FROM busybox
RUN echo from Dockerfile`),
)).Assert(c, icmd.Expected{
Out: "from Dockerfile",
})
}
func (s *DockerSuite) TestBuildFromURLWithF(c *check.C) {
server := fakeStorage(c, map[string]string{"baz": `FROM busybox
RUN echo from baz
COPY * /tmp/
RUN find /tmp/`})
defer server.Close()
ctx := fakeContext(c, `FROM busybox
RUN echo from Dockerfile`,
map[string]string{})
defer ctx.Close()
// Make sure that -f is ignored and that we don't use the Dockerfile
// that's in the current dir
result := buildImage("test1", cli.WithFlags("-f", "baz", server.URL()+"/baz"), func(cmd *icmd.Cmd) func() {
cmd.Dir = ctx.Dir
return nil
})
result.Assert(c, icmd.Success)
if !strings.Contains(result.Combined(), "from baz") ||
strings.Contains(result.Combined(), "/tmp/baz") ||
!strings.Contains(result.Combined(), "/tmp/Dockerfile") {
c.Fatalf("Missing proper output: %s", result.Combined())
}
}
func (s *DockerSuite) TestBuildFromStdinWithF(c *check.C) {
testRequires(c, DaemonIsLinux) // TODO Windows: This test is flaky; no idea why
ctx := fakeContext(c, `FROM busybox
RUN echo "from Dockerfile"`,
map[string]string{})
defer ctx.Close()
// Make sure that -f is ignored and that we don't use the Dockerfile
// that's in the current dir
result := buildImage("test1", cli.WithFlags("-f", "baz", "-"), func(cmd *icmd.Cmd) func() {
cmd.Dir = ctx.Dir
cmd.Stdin = strings.NewReader(`FROM busybox
RUN echo "from baz"
COPY * /tmp/
RUN sh -c "find /tmp/" # sh -c is needed on Windows to use the correct find`)
return nil
})
result.Assert(c, icmd.Success)
if !strings.Contains(result.Combined(), "from baz") ||
strings.Contains(result.Combined(), "/tmp/baz") ||
!strings.Contains(result.Combined(), "/tmp/Dockerfile") {
c.Fatalf("Missing proper output: %s", result.Combined())
}
}
func (s *DockerSuite) TestBuildFromOfficialNames(c *check.C) {
name := "testbuildfromofficial"
fromNames := []string{
"busybox",
"docker.io/busybox",
"index.docker.io/busybox",
"library/busybox",
"docker.io/library/busybox",
"index.docker.io/library/busybox",
}
for idx, fromName := range fromNames {
imgName := fmt.Sprintf("%s%d", name, idx)
buildImageSuccessfully(c, imgName, build.WithDockerfile("FROM "+fromName))
dockerCmd(c, "rmi", imgName)
}
}
func (s *DockerSuite) TestBuildDockerfileOutsideContext(c *check.C) {
testRequires(c, UnixCli, DaemonIsLinux) // uses os.Symlink: not implemented in windows at the time of writing (go-1.4.2)
name := "testbuilddockerfileoutsidecontext"
tmpdir, err := ioutil.TempDir("", name)
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpdir)
ctx := filepath.Join(tmpdir, "context")
if err := os.MkdirAll(ctx, 0755); err != nil {
c.Fatal(err)
}
if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte("FROM scratch\nENV X Y"), 0644); err != nil {
c.Fatal(err)
}
wd, err := os.Getwd()
if err != nil {
c.Fatal(err)
}
defer os.Chdir(wd)
if err := os.Chdir(ctx); err != nil {
c.Fatal(err)
}
if err := ioutil.WriteFile(filepath.Join(tmpdir, "outsideDockerfile"), []byte("FROM scratch\nENV x y"), 0644); err != nil {
c.Fatal(err)
}
if err := os.Symlink(filepath.Join("..", "outsideDockerfile"), filepath.Join(ctx, "dockerfile1")); err != nil {
c.Fatal(err)
}
if err := os.Symlink(filepath.Join(tmpdir, "outsideDockerfile"), filepath.Join(ctx, "dockerfile2")); err != nil {
c.Fatal(err)
}
for _, dockerfilePath := range []string{
filepath.Join("..", "outsideDockerfile"),
filepath.Join(ctx, "dockerfile1"),
filepath.Join(ctx, "dockerfile2"),
} {
result := dockerCmdWithResult("build", "-t", name, "--no-cache", "-f", dockerfilePath, ".")
c.Assert(result, icmd.Matches, icmd.Expected{
Err: "must be within the build context",
ExitCode: 1,
})
deleteImages(name)
}
os.Chdir(tmpdir)
// Path to Dockerfile should be resolved relative to working directory, not relative to context.
// There is a Dockerfile in the context, but since there is no Dockerfile in the current directory, the following should fail
out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", "Dockerfile", ctx)
if err == nil {
c.Fatalf("Expected error. Out: %s", out)
}
}
// FIXME(vdemeester) should be a unit test
func (s *DockerSuite) TestBuildSpaces(c *check.C) {
// Test to make sure that leading/trailing spaces on a command
// doesn't change the error msg we get
name := "testspaces"
ctx := fakeContext(c, "FROM busybox\nCOPY\n",
map[string]string{
"Dockerfile": "FROM busybox\nCOPY\n",
})
defer ctx.Close()
result1 := buildImage(name, withExternalBuildContext(ctx))
result1.Assert(c, icmd.Expected{
ExitCode: 1,
})
ctx.Add("Dockerfile", "FROM busybox\nCOPY ")
result2 := buildImage(name, withExternalBuildContext(ctx))
result2.Assert(c, icmd.Expected{
ExitCode: 1,
})
removeLogTimestamps := func(s string) string {
return regexp.MustCompile(`time="(.*?)"`).ReplaceAllString(s, `time=[TIMESTAMP]`)
}
// Skip over the times
e1 := removeLogTimestamps(result1.Error.Error())
e2 := removeLogTimestamps(result2.Error.Error())
// Ignore whitespace since that's what were verifying doesn't change stuff
if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
c.Fatalf("Build 2's error wasn't the same as build 1's\n1:%s\n2:%s", result1.Error, result2.Error)
}
ctx.Add("Dockerfile", "FROM busybox\n COPY")
result2 = buildImage(name, build.WithoutCache, withExternalBuildContext(ctx))
result2.Assert(c, icmd.Expected{
ExitCode: 1,
})
// Skip over the times
e1 = removeLogTimestamps(result1.Error.Error())
e2 = removeLogTimestamps(result2.Error.Error())
// Ignore whitespace since that's what were verifying doesn't change stuff
if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
c.Fatalf("Build 3's error wasn't the same as build 1's\n1:%s\n3:%s", result1.Error, result2.Error)
}
ctx.Add("Dockerfile", "FROM busybox\n COPY ")
result2 = buildImage(name, build.WithoutCache, withExternalBuildContext(ctx))
result2.Assert(c, icmd.Expected{
ExitCode: 1,
})
// Skip over the times
e1 = removeLogTimestamps(result1.Error.Error())
e2 = removeLogTimestamps(result2.Error.Error())
// Ignore whitespace since that's what were verifying doesn't change stuff
if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
c.Fatalf("Build 4's error wasn't the same as build 1's\n1:%s\n4:%s", result1.Error, result2.Error)
}
}
func (s *DockerSuite) TestBuildSpacesWithQuotes(c *check.C) {
// Test to make sure that spaces in quotes aren't lost
name := "testspacesquotes"
dockerfile := `FROM busybox
RUN echo " \
foo "`
expected := "\n foo \n"
// Windows uses the builtin echo, which preserves quotes
if testEnv.DaemonPlatform() == "windows" {
expected = "\" foo \""
}
buildImage(name, build.WithDockerfile(dockerfile)).Assert(c, icmd.Expected{
Out: expected,
})
}
// #4393
func (s *DockerSuite) TestBuildVolumeFileExistsinContainer(c *check.C) {
testRequires(c, DaemonIsLinux) // TODO Windows: This should error out
buildImage("docker-test-errcreatevolumewithfile", build.WithDockerfile(`
FROM busybox
RUN touch /foo
VOLUME /foo
`)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "file exists",
})
}
// FIXME(vdemeester) should be a unit test
func (s *DockerSuite) TestBuildMissingArgs(c *check.C) {
// Test to make sure that all Dockerfile commands (except the ones listed
// in skipCmds) will generate an error if no args are provided.
// Note: INSERT is deprecated so we exclude it because of that.
skipCmds := map[string]struct{}{
"CMD": {},
"RUN": {},
"ENTRYPOINT": {},
"INSERT": {},
}
if testEnv.DaemonPlatform() == "windows" {
skipCmds = map[string]struct{}{
"CMD": {},
"RUN": {},
"ENTRYPOINT": {},
"INSERT": {},
"STOPSIGNAL": {},
"ARG": {},
"USER": {},
"EXPOSE": {},
}
}
for cmd := range command.Commands {
cmd = strings.ToUpper(cmd)
if _, ok := skipCmds[cmd]; ok {
continue
}
var dockerfile string
if cmd == "FROM" {
dockerfile = cmd
} else {
// Add FROM to make sure we don't complain about it missing
dockerfile = "FROM busybox\n" + cmd
}
buildImage("args", build.WithDockerfile(dockerfile)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: cmd + " requires",
})
}
}
func (s *DockerSuite) TestBuildEmptyScratch(c *check.C) {
testRequires(c, DaemonIsLinux)
buildImage("sc", build.WithDockerfile("FROM scratch")).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "No image was generated",
})
}
func (s *DockerSuite) TestBuildDotDotFile(c *check.C) {
buildImageSuccessfully(c, "sc", withBuildContext(c,
withFile("Dockerfile", "FROM busybox\n"),
withFile("..gitme", ""),
))
}
func (s *DockerSuite) TestBuildRUNoneJSON(c *check.C) {
testRequires(c, DaemonIsLinux) // No hello-world Windows image
name := "testbuildrunonejson"
buildImage(name, build.WithDockerfile(`FROM hello-world:frozen
RUN [ "/hello" ]`)).Assert(c, icmd.Expected{
Out: "Hello from Docker",
})
}
func (s *DockerSuite) TestBuildEmptyStringVolume(c *check.C) {
name := "testbuildemptystringvolume"
buildImage(name, build.WithDockerfile(`
FROM busybox
ENV foo=""
VOLUME $foo
`)).Assert(c, icmd.Expected{
ExitCode: 1,
})
}
func (s *DockerSuite) TestBuildContainerWithCgroupParent(c *check.C) {
testRequires(c, SameHostDaemon, DaemonIsLinux)
cgroupParent := "test"
data, err := ioutil.ReadFile("/proc/self/cgroup")
if err != nil {
c.Fatalf("failed to read '/proc/self/cgroup - %v", err)
}
selfCgroupPaths := testutil.ParseCgroupPaths(string(data))
_, found := selfCgroupPaths["memory"]
if !found {
c.Fatalf("unable to find self memory cgroup path. CgroupsPath: %v", selfCgroupPaths)
}
result := buildImage("buildcgroupparent",
cli.WithFlags("--cgroup-parent", cgroupParent),
build.WithDockerfile(`
FROM busybox
RUN cat /proc/self/cgroup
`))
result.Assert(c, icmd.Success)
m, err := regexp.MatchString(fmt.Sprintf("memory:.*/%s/.*", cgroupParent), result.Combined())
c.Assert(err, check.IsNil)
if !m {
c.Fatalf("There is no expected memory cgroup with parent /%s/: %s", cgroupParent, result.Combined())
}
}
// FIXME(vdemeester) could be a unit test
func (s *DockerSuite) TestBuildNoDupOutput(c *check.C) {
// Check to make sure our build output prints the Dockerfile cmd
// property - there was a bug that caused it to be duplicated on the
// Step X line
name := "testbuildnodupoutput"
result := buildImage(name, build.WithDockerfile(`
FROM busybox
RUN env`))
result.Assert(c, icmd.Success)
exp := "\nStep 2/2 : RUN env\n"
if !strings.Contains(result.Combined(), exp) {
c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", result.Combined(), exp)
}
}
// GH15826
// FIXME(vdemeester) could be a unit test
func (s *DockerSuite) TestBuildStartsFromOne(c *check.C) {
// Explicit check to ensure that build starts from step 1 rather than 0
name := "testbuildstartsfromone"
result := buildImage(name, build.WithDockerfile(`FROM busybox`))
result.Assert(c, icmd.Success)
exp := "\nStep 1/1 : FROM busybox\n"
if !strings.Contains(result.Combined(), exp) {
c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", result.Combined(), exp)
}
}
func (s *DockerSuite) TestBuildRUNErrMsg(c *check.C) {
// Test to make sure the bad command is quoted with just "s and
// not as a Go []string
name := "testbuildbadrunerrmsg"
shell := "/bin/sh -c"
exitCode := 127
if testEnv.DaemonPlatform() == "windows" {
shell = "cmd /S /C"
// architectural - Windows has to start the container to determine the exe is bad, Linux does not
exitCode = 1
}
exp := fmt.Sprintf(`The command '%s badEXE a1 \& a2 a3' returned a non-zero code: %d`, shell, exitCode)
buildImage(name, build.WithDockerfile(`
FROM busybox
RUN badEXE a1 \& a2 a3`)).Assert(c, icmd.Expected{
ExitCode: exitCode,
Err: exp,
})
}
func (s *DockerTrustSuite) TestTrustedBuild(c *check.C) {
repoName := s.setupTrustedImage(c, "trusted-build")
dockerFile := fmt.Sprintf(`
FROM %s
RUN []
`, repoName)
name := "testtrustedbuild"
buildImage(name, trustedBuild, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{
Out: fmt.Sprintf("FROM %s@sha", repoName[:len(repoName)-7]),
})
// We should also have a tag reference for the image.
dockerCmd(c, "inspect", repoName)
// We should now be able to remove the tag reference.
dockerCmd(c, "rmi", repoName)
}
func (s *DockerTrustSuite) TestTrustedBuildUntrustedTag(c *check.C) {
repoName := fmt.Sprintf("%v/dockercli/build-untrusted-tag:latest", privateRegistryURL)
dockerFile := fmt.Sprintf(`
FROM %s
RUN []
`, repoName)
name := "testtrustedbuilduntrustedtag"
buildImage(name, trustedBuild, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "does not have trust data for",
})
}
func (s *DockerTrustSuite) TestBuildContextDirIsSymlink(c *check.C) {
testRequires(c, DaemonIsLinux)
tempDir, err := ioutil.TempDir("", "test-build-dir-is-symlink-")
c.Assert(err, check.IsNil)
defer os.RemoveAll(tempDir)
// Make a real context directory in this temp directory with a simple
// Dockerfile.
realContextDirname := filepath.Join(tempDir, "context")
if err := os.Mkdir(realContextDirname, os.FileMode(0755)); err != nil {
c.Fatal(err)
}
if err = ioutil.WriteFile(
filepath.Join(realContextDirname, "Dockerfile"),
[]byte(`
FROM busybox
RUN echo hello world
`),
os.FileMode(0644),
); err != nil {
c.Fatal(err)
}
// Make a symlink to the real context directory.
contextSymlinkName := filepath.Join(tempDir, "context_link")
if err := os.Symlink(realContextDirname, contextSymlinkName); err != nil {
c.Fatal(err)
}
// Executing the build with the symlink as the specified context should
// *not* fail.
dockerCmd(c, "build", contextSymlinkName)
}
func (s *DockerTrustSuite) TestTrustedBuildTagFromReleasesRole(c *check.C) {
testRequires(c, NotaryHosting)
latestTag := s.setupTrustedImage(c, "trusted-build-releases-role")
repoName := strings.TrimSuffix(latestTag, ":latest")
// Now create the releases role
s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public)
s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private)
s.notaryPublish(c, repoName)
// push a different tag to the releases role
otherTag := fmt.Sprintf("%s:other", repoName)
dockerCmd(c, "tag", "busybox", otherTag)
icmd.RunCmd(icmd.Command(dockerBinary, "push", otherTag), trustedCmd).Assert(c, icmd.Success)
s.assertTargetInRoles(c, repoName, "other", "targets/releases")
s.assertTargetNotInRoles(c, repoName, "other", "targets")
out, status := dockerCmd(c, "rmi", otherTag)
c.Assert(status, check.Equals, 0, check.Commentf("docker rmi failed: %s", out))
dockerFile := fmt.Sprintf(`
FROM %s
RUN []
`, otherTag)
name := "testtrustedbuildreleasesrole"
buildImage(name, trustedBuild, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{
Out: fmt.Sprintf("FROM %s@sha", repoName),
})
}
func (s *DockerTrustSuite) TestTrustedBuildTagIgnoresOtherDelegationRoles(c *check.C) {
testRequires(c, NotaryHosting)
latestTag := s.setupTrustedImage(c, "trusted-build-releases-role")
repoName := strings.TrimSuffix(latestTag, ":latest")
// Now create a non-releases delegation role
s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[0].Public)
s.notaryImportKey(c, repoName, "targets/other", s.not.keys[0].Private)
s.notaryPublish(c, repoName)
// push a different tag to the other role
otherTag := fmt.Sprintf("%s:other", repoName)
dockerCmd(c, "tag", "busybox", otherTag)
icmd.RunCmd(icmd.Command(dockerBinary, "push", otherTag), trustedCmd).Assert(c, icmd.Success)
s.assertTargetInRoles(c, repoName, "other", "targets/other")
s.assertTargetNotInRoles(c, repoName, "other", "targets")
out, status := dockerCmd(c, "rmi", otherTag)
c.Assert(status, check.Equals, 0, check.Commentf("docker rmi failed: %s", out))
dockerFile := fmt.Sprintf(`
FROM %s
RUN []
`, otherTag)
name := "testtrustedbuildotherrole"
buildImage(name, trustedBuild, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{
ExitCode: 1,
})
}
// Issue #15634: COPY fails when path starts with "null"
func (s *DockerSuite) TestBuildNullStringInAddCopyVolume(c *check.C) {
name := "testbuildnullstringinaddcopyvolume"
volName := "nullvolume"
if testEnv.DaemonPlatform() == "windows" {
volName = `C:\\nullvolume`
}
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", `
FROM busybox
ADD null /
COPY nullfile /
VOLUME `+volName+`
`),
withFile("null", "test1"),
withFile("nullfile", "test2"),
))
}
func (s *DockerSuite) TestBuildStopSignal(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support STOPSIGNAL yet
imgName := "test_build_stop_signal"
buildImageSuccessfully(c, imgName, build.WithDockerfile(`FROM busybox
STOPSIGNAL SIGKILL`))
res := inspectFieldJSON(c, imgName, "Config.StopSignal")
if res != `"SIGKILL"` {
c.Fatalf("Signal %s, expected SIGKILL", res)
}
containerName := "test-container-stop-signal"
dockerCmd(c, "run", "-d", "--name", containerName, imgName, "top")
res = inspectFieldJSON(c, containerName, "Config.StopSignal")
if res != `"SIGKILL"` {
c.Fatalf("Signal %s, expected SIGKILL", res)
}
}
func (s *DockerSuite) TestBuildBuildTimeArg(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
var dockerfile string
if testEnv.DaemonPlatform() == "windows" {
// Bugs in Windows busybox port - use the default base image and native cmd stuff
dockerfile = fmt.Sprintf(`FROM `+minimalBaseImage()+`
ARG %s
RUN echo %%%s%%
CMD setlocal enableextensions && if defined %s (echo %%%s%%)`, envKey, envKey, envKey, envKey)
} else {
dockerfile = fmt.Sprintf(`FROM busybox
ARG %s
RUN echo $%s
CMD echo $%s`, envKey, envKey, envKey)
}
buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
).Assert(c, icmd.Expected{
Out: envVal,
})
containerName := "bldargCont"
out, _ := dockerCmd(c, "run", "--name", containerName, imgName)
out = strings.Trim(out, " \r\n'")
if out != "" {
c.Fatalf("run produced invalid output: %q, expected empty string", out)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgHistory(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
envDef := "bar1"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s=%s`, envKey, envDef)
buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
).Assert(c, icmd.Expected{
Out: envVal,
})
out, _ := dockerCmd(c, "history", "--no-trunc", imgName)
outputTabs := strings.Split(out, "\n")[1]
if !strings.Contains(outputTabs, envDef) {
c.Fatalf("failed to find arg default in image history output: %q expected: %q", outputTabs, envDef)
}
}
func (s *DockerSuite) TestBuildTimeArgHistoryExclusions(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
proxy := "HTTP_PROXY=http://user:[email protected]"
explicitProxyKey := "http_proxy"
explicitProxyVal := "http://user:[email protected]"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
ARG %s
RUN echo "Testing Build Args!"`, envKey, explicitProxyKey)
buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
"--build-arg", fmt.Sprintf("%s=%s", explicitProxyKey, explicitProxyVal),
"--build-arg", proxy),
build.WithDockerfile(dockerfile),
).Assert(c, icmd.Success)
out, _ := dockerCmd(c, "history", "--no-trunc", imgName)
if strings.Contains(out, proxy) {
c.Fatalf("failed to exclude proxy settings from history!")
}
if !strings.Contains(out, fmt.Sprintf("%s=%s", envKey, envVal)) {
c.Fatalf("explicitly defined ARG %s is not in output", explicitProxyKey)
}
if !strings.Contains(out, fmt.Sprintf("%s=%s", envKey, envVal)) {
c.Fatalf("missing build arguments from output")
}
}
func (s *DockerSuite) TestBuildBuildTimeArgCacheHit(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
RUN echo $%s`, envKey, envKey)
buildImageSuccessfully(c, imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
origImgID := getIDByName(c, imgName)
imgNameCache := "bldargtestcachehit"
buildImageSuccessfully(c, imgNameCache,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
newImgID := getIDByName(c, imgName)
if newImgID != origImgID {
c.Fatalf("build didn't use cache! expected image id: %q built image id: %q", origImgID, newImgID)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgCacheMissExtraArg(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
extraEnvKey := "foo1"
extraEnvVal := "bar1"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
ARG %s
RUN echo $%s`, envKey, extraEnvKey, envKey)
buildImageSuccessfully(c, imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
origImgID := getIDByName(c, imgName)
imgNameCache := "bldargtestcachemiss"
buildImageSuccessfully(c, imgNameCache,
cli.WithFlags(
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
"--build-arg", fmt.Sprintf("%s=%s", extraEnvKey, extraEnvVal),
),
build.WithDockerfile(dockerfile),
)
newImgID := getIDByName(c, imgNameCache)
if newImgID == origImgID {
c.Fatalf("build used cache, expected a miss!")
}
}
func (s *DockerSuite) TestBuildBuildTimeArgCacheMissSameArgDiffVal(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
newEnvVal := "bar1"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
RUN echo $%s`, envKey, envKey)
buildImageSuccessfully(c, imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
origImgID := getIDByName(c, imgName)
imgNameCache := "bldargtestcachemiss"
buildImageSuccessfully(c, imgNameCache,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, newEnvVal)),
build.WithDockerfile(dockerfile),
)
newImgID := getIDByName(c, imgNameCache)
if newImgID == origImgID {
c.Fatalf("build used cache, expected a miss!")
}
}
func (s *DockerSuite) TestBuildBuildTimeArgOverrideArgDefinedBeforeEnv(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
envValOveride := "barOverride"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
ENV %s %s
RUN echo $%s
CMD echo $%s
`, envKey, envKey, envValOveride, envKey, envKey)
result := buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
result.Assert(c, icmd.Success)
if strings.Count(result.Combined(), envValOveride) != 2 {
c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOveride)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) {
c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride)
}
}
// FIXME(vdemeester) might be useful to merge with the one above ?
func (s *DockerSuite) TestBuildBuildTimeArgOverrideEnvDefinedBeforeArg(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
envValOveride := "barOverride"
dockerfile := fmt.Sprintf(`FROM busybox
ENV %s %s
ARG %s
RUN echo $%s
CMD echo $%s
`, envKey, envValOveride, envKey, envKey, envKey)
result := buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
result.Assert(c, icmd.Success)
if strings.Count(result.Combined(), envValOveride) != 2 {
c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOveride)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) {
c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgExpansion(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldvarstest"
wdVar := "WDIR"
wdVal := "/tmp/"
addVar := "AFILE"
addVal := "addFile"
copyVar := "CFILE"
copyVal := "copyFile"
envVar := "foo"
envVal := "bar"
exposeVar := "EPORT"
exposeVal := "9999"
userVar := "USER"
userVal := "testUser"
volVar := "VOL"
volVal := "/testVol/"
buildImageSuccessfully(c, imgName,
cli.WithFlags(
"--build-arg", fmt.Sprintf("%s=%s", wdVar, wdVal),
"--build-arg", fmt.Sprintf("%s=%s", addVar, addVal),
"--build-arg", fmt.Sprintf("%s=%s", copyVar, copyVal),
"--build-arg", fmt.Sprintf("%s=%s", envVar, envVal),
"--build-arg", fmt.Sprintf("%s=%s", exposeVar, exposeVal),
"--build-arg", fmt.Sprintf("%s=%s", userVar, userVal),
"--build-arg", fmt.Sprintf("%s=%s", volVar, volVal),
),
withBuildContext(c,
withFile("Dockerfile", fmt.Sprintf(`FROM busybox
ARG %s
WORKDIR ${%s}
ARG %s
ADD ${%s} testDir/
ARG %s
COPY $%s testDir/
ARG %s
ENV %s=${%s}
ARG %s
EXPOSE $%s
ARG %s
USER $%s
ARG %s
VOLUME ${%s}`,
wdVar, wdVar, addVar, addVar, copyVar, copyVar, envVar, envVar,
envVar, exposeVar, exposeVar, userVar, userVar, volVar, volVar)),
withFile(addVal, "some stuff"),
withFile(copyVal, "some stuff"),
),
)
res := inspectField(c, imgName, "Config.WorkingDir")
if res != filepath.ToSlash(filepath.Clean(wdVal)) {
c.Fatalf("Config.WorkingDir value mismatch. Expected: %s, got: %s", filepath.ToSlash(filepath.Clean(wdVal)), res)
}
var resArr []string
inspectFieldAndUnmarshall(c, imgName, "Config.Env", &resArr)
found := false
for _, v := range resArr {
if fmt.Sprintf("%s=%s", envVar, envVal) == v {
found = true
break
}
}
if !found {
c.Fatalf("Config.Env value mismatch. Expected <key=value> to exist: %s=%s, got: %v",
envVar, envVal, resArr)
}
var resMap map[string]interface{}
inspectFieldAndUnmarshall(c, imgName, "Config.ExposedPorts", &resMap)
if _, ok := resMap[fmt.Sprintf("%s/tcp", exposeVal)]; !ok {
c.Fatalf("Config.ExposedPorts value mismatch. Expected exposed port: %s/tcp, got: %v", exposeVal, resMap)
}
res = inspectField(c, imgName, "Config.User")
if res != userVal {
c.Fatalf("Config.User value mismatch. Expected: %s, got: %s", userVal, res)
}
inspectFieldAndUnmarshall(c, imgName, "Config.Volumes", &resMap)
if _, ok := resMap[volVal]; !ok {
c.Fatalf("Config.Volumes value mismatch. Expected volume: %s, got: %v", volVal, resMap)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgExpansionOverride(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldvarstest"
envKey := "foo"
envVal := "bar"
envKey1 := "foo1"
envValOveride := "barOverride"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
ENV %s %s
ENV %s ${%s}
RUN echo $%s
CMD echo $%s`, envKey, envKey, envValOveride, envKey1, envKey, envKey1, envKey1)
result := buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
result.Assert(c, icmd.Success)
if strings.Count(result.Combined(), envValOveride) != 2 {
c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOveride)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) {
c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgUntrustedDefinedAfterUse(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
dockerfile := fmt.Sprintf(`FROM busybox
RUN echo $%s
ARG %s
CMD echo $%s`, envKey, envKey, envKey)
result := buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
result.Assert(c, icmd.Success)
if strings.Contains(result.Combined(), envVal) {
c.Fatalf("able to access environment variable in output: %q expected to be missing", result.Combined())
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" {
c.Fatalf("run produced invalid output: %q, expected empty string", out)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgBuiltinArg(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support --build-arg
imgName := "bldargtest"
envKey := "HTTP_PROXY"
envVal := "bar"
dockerfile := fmt.Sprintf(`FROM busybox
RUN echo $%s
CMD echo $%s`, envKey, envKey)
result := buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
result.Assert(c, icmd.Success)
if !strings.Contains(result.Combined(), envVal) {
c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envVal)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" {
c.Fatalf("run produced invalid output: %q, expected empty string", out)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgDefaultOverride(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
envValOveride := "barOverride"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s=%s
ENV %s $%s
RUN echo $%s
CMD echo $%s`, envKey, envVal, envKey, envKey, envKey, envKey)
result := buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envValOveride)),
build.WithDockerfile(dockerfile),
)
result.Assert(c, icmd.Success)
if strings.Count(result.Combined(), envValOveride) != 1 {
c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOveride)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) {
c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgUnconsumedArg(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
dockerfile := fmt.Sprintf(`FROM busybox
RUN echo $%s
CMD echo $%s`, envKey, envKey)
warnStr := "[Warning] One or more build-args"
buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
).Assert(c, icmd.Expected{
Out: warnStr,
})
}
func (s *DockerSuite) TestBuildBuildTimeArgEnv(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
dockerfile := `FROM busybox
ARG FOO1=fromfile
ARG FOO2=fromfile
ARG FOO3=fromfile
ARG FOO4=fromfile
ARG FOO5
ARG FOO6
ARG FO10
RUN env
RUN [ "$FOO1" == "fromcmd" ]
RUN [ "$FOO2" == "" ]
RUN [ "$FOO3" == "fromenv" ]
RUN [ "$FOO4" == "fromfile" ]
RUN [ "$FOO5" == "fromcmd" ]
# The following should not exist at all in the env
RUN [ "$(env | grep FOO6)" == "" ]
RUN [ "$(env | grep FOO7)" == "" ]
RUN [ "$(env | grep FOO8)" == "" ]
RUN [ "$(env | grep FOO9)" == "" ]
RUN [ "$FO10" == "" ]
`
result := buildImage("testbuildtimeargenv",
cli.WithFlags(
"--build-arg", fmt.Sprintf("FOO1=fromcmd"),
"--build-arg", fmt.Sprintf("FOO2="),
"--build-arg", fmt.Sprintf("FOO3"), // set in env
"--build-arg", fmt.Sprintf("FOO4"), // not set in env
"--build-arg", fmt.Sprintf("FOO5=fromcmd"),
// FOO6 is not set at all
"--build-arg", fmt.Sprintf("FOO7=fromcmd"), // should produce a warning
"--build-arg", fmt.Sprintf("FOO8="), // should produce a warning
"--build-arg", fmt.Sprintf("FOO9"), // should produce a warning
"--build-arg", fmt.Sprintf("FO10"), // not set in env, empty value
),
cli.WithEnvironmentVariables(append(os.Environ(),
"FOO1=fromenv",
"FOO2=fromenv",
"FOO3=fromenv")...),
withBuildContext(c,
withFile("Dockerfile", dockerfile),
),
)
result.Assert(c, icmd.Success)
// Now check to make sure we got a warning msg about unused build-args
i := strings.Index(result.Combined(), "[Warning]")
if i < 0 {
c.Fatalf("Missing the build-arg warning in %q", result.Combined())
}
out := result.Combined()[i:] // "out" should contain just the warning message now
// These were specified on a --build-arg but no ARG was in the Dockerfile
c.Assert(out, checker.Contains, "FOO7")
c.Assert(out, checker.Contains, "FOO8")
c.Assert(out, checker.Contains, "FOO9")
}
func (s *DockerSuite) TestBuildBuildTimeArgQuotedValVariants(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envKey1 := "foo1"
envKey2 := "foo2"
envKey3 := "foo3"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s=""
ARG %s=''
ARG %s="''"
ARG %s='""'
RUN [ "$%s" != "$%s" ]
RUN [ "$%s" != "$%s" ]
RUN [ "$%s" != "$%s" ]
RUN [ "$%s" != "$%s" ]
RUN [ "$%s" != "$%s" ]`, envKey, envKey1, envKey2, envKey3,
envKey, envKey2, envKey, envKey3, envKey1, envKey2, envKey1, envKey3,
envKey2, envKey3)
buildImageSuccessfully(c, imgName, build.WithDockerfile(dockerfile))
}
func (s *DockerSuite) TestBuildBuildTimeArgEmptyValVariants(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envKey1 := "foo1"
envKey2 := "foo2"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s=
ARG %s=""
ARG %s=''
RUN [ "$%s" == "$%s" ]
RUN [ "$%s" == "$%s" ]
RUN [ "$%s" == "$%s" ]`, envKey, envKey1, envKey2, envKey, envKey1, envKey1, envKey2, envKey, envKey2)
buildImageSuccessfully(c, imgName, build.WithDockerfile(dockerfile))
}
func (s *DockerSuite) TestBuildBuildTimeArgDefintionWithNoEnvInjection(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
RUN env`, envKey)
result := buildImage(imgName, build.WithDockerfile(dockerfile))
result.Assert(c, icmd.Success)
if strings.Count(result.Combined(), envKey) != 1 {
c.Fatalf("unexpected number of occurrences of the arg in output: %q expected: 1", result.Combined())
}
}
func (s *DockerSuite) TestBuildBuildTimeArgMultipleFrom(c *check.C) {
imgName := "multifrombldargtest"
dockerfile := `FROM busybox
ARG foo=abc
LABEL multifromtest=1
RUN env > /out
FROM busybox
ARG bar=def
RUN env > /out`
result := buildImage(imgName, build.WithDockerfile(dockerfile))
result.Assert(c, icmd.Success)
result = icmd.RunCmd(icmd.Cmd{
Command: []string{dockerBinary, "images", "-q", "-f", "label=multifromtest=1"},
})
result.Assert(c, icmd.Success)
parentID := strings.TrimSpace(result.Stdout())
result = icmd.RunCmd(icmd.Cmd{
Command: []string{dockerBinary, "run", "--rm", parentID, "cat", "/out"},
})
result.Assert(c, icmd.Success)
c.Assert(result.Stdout(), checker.Contains, "foo=abc")
result = icmd.RunCmd(icmd.Cmd{
Command: []string{dockerBinary, "run", "--rm", imgName, "cat", "/out"},
})
result.Assert(c, icmd.Success)
c.Assert(result.Stdout(), checker.Not(checker.Contains), "foo")
c.Assert(result.Stdout(), checker.Contains, "bar=def")
}
func (s *DockerSuite) TestBuildBuildTimeUnusedArgMultipleFrom(c *check.C) {
imgName := "multifromunusedarg"
dockerfile := `FROM busybox
ARG foo
FROM busybox
ARG bar
RUN env > /out`
result := buildImage(imgName, build.WithDockerfile(dockerfile), cli.WithFlags(
"--build-arg", fmt.Sprintf("baz=abc")))
result.Assert(c, icmd.Success)
c.Assert(result.Combined(), checker.Contains, "[Warning]")
c.Assert(result.Combined(), checker.Contains, "[baz] were not consumed")
result = icmd.RunCmd(icmd.Cmd{
Command: []string{dockerBinary, "run", "--rm", imgName, "cat", "/out"},
})
result.Assert(c, icmd.Success)
c.Assert(result.Stdout(), checker.Not(checker.Contains), "bar")
c.Assert(result.Stdout(), checker.Not(checker.Contains), "baz")
}
func (s *DockerSuite) TestBuildNoNamedVolume(c *check.C) {
volName := "testname:/foo"
if testEnv.DaemonPlatform() == "windows" {
volName = "testname:C:\\foo"
}
dockerCmd(c, "run", "-v", volName, "busybox", "sh", "-c", "touch /foo/oops")
dockerFile := `FROM busybox
VOLUME ` + volName + `
RUN ls /foo/oops
`
buildImage("test", build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{
ExitCode: 1,
})
}
func (s *DockerSuite) TestBuildTagEvent(c *check.C) {
since := daemonUnixTime(c)
dockerFile := `FROM busybox
RUN echo events
`
buildImageSuccessfully(c, "test", build.WithDockerfile(dockerFile))
until := daemonUnixTime(c)
out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "type=image")
events := strings.Split(strings.TrimSpace(out), "\n")
actions := eventActionsByIDAndType(c, events, "test:latest", "image")
var foundTag bool
for _, a := range actions {
if a == "tag" {
foundTag = true
break
}
}
c.Assert(foundTag, checker.True, check.Commentf("No tag event found:\n%s", out))
}
// #15780
func (s *DockerSuite) TestBuildMultipleTags(c *check.C) {
dockerfile := `
FROM busybox
MAINTAINER test-15780
`
buildImageSuccessfully(c, "tag1", cli.WithFlags("-t", "tag2:v2", "-t", "tag1:latest", "-t", "tag1"), build.WithDockerfile(dockerfile))
id1 := getIDByName(c, "tag1")
id2 := getIDByName(c, "tag2:v2")
c.Assert(id1, check.Equals, id2)
}
// #17290
func (s *DockerSuite) TestBuildCacheBrokenSymlink(c *check.C) {
name := "testbuildbrokensymlink"
ctx := fakeContext(c, `
FROM busybox
COPY . ./`,
map[string]string{
"foo": "bar",
})
defer ctx.Close()
err := os.Symlink(filepath.Join(ctx.Dir, "nosuchfile"), filepath.Join(ctx.Dir, "asymlink"))
c.Assert(err, checker.IsNil)
// warm up cache
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
// add new file to context, should invalidate cache
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "newfile"), []byte("foo"), 0644)
c.Assert(err, checker.IsNil)
result := buildImage(name, withExternalBuildContext(ctx))
result.Assert(c, icmd.Success)
if strings.Contains(result.Combined(), "Using cache") {
c.Fatal("2nd build used cache on ADD, it shouldn't")
}
}
func (s *DockerSuite) TestBuildFollowSymlinkToFile(c *check.C) {
name := "testbuildbrokensymlink"
ctx := fakeContext(c, `
FROM busybox
COPY asymlink target`,
map[string]string{
"foo": "bar",
})
defer ctx.Close()
err := os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink"))
c.Assert(err, checker.IsNil)
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
out, _ := dockerCmd(c, "run", "--rm", name, "cat", "target")
c.Assert(out, checker.Matches, "bar")
// change target file should invalidate cache
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644)
c.Assert(err, checker.IsNil)
result := buildImage(name, withExternalBuildContext(ctx))
result.Assert(c, icmd.Success)
c.Assert(result.Combined(), checker.Not(checker.Contains), "Using cache")
out, _ = dockerCmd(c, "run", "--rm", name, "cat", "target")
c.Assert(out, checker.Matches, "baz")
}
func (s *DockerSuite) TestBuildFollowSymlinkToDir(c *check.C) {
name := "testbuildbrokensymlink"
ctx := fakeContext(c, `
FROM busybox
COPY asymlink /`,
map[string]string{
"foo/abc": "bar",
"foo/def": "baz",
})
defer ctx.Close()
err := os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink"))
c.Assert(err, checker.IsNil)
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
out, _ := dockerCmd(c, "run", "--rm", name, "cat", "abc", "def")
c.Assert(out, checker.Matches, "barbaz")
// change target file should invalidate cache
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo/def"), []byte("bax"), 0644)
c.Assert(err, checker.IsNil)
result := buildImage(name, withExternalBuildContext(ctx))
result.Assert(c, icmd.Success)
c.Assert(result.Combined(), checker.Not(checker.Contains), "Using cache")
out, _ = dockerCmd(c, "run", "--rm", name, "cat", "abc", "def")
c.Assert(out, checker.Matches, "barbax")
}
// TestBuildSymlinkBasename tests that target file gets basename from symlink,
// not from the target file.
func (s *DockerSuite) TestBuildSymlinkBasename(c *check.C) {
name := "testbuildbrokensymlink"
ctx := fakeContext(c, `
FROM busybox
COPY asymlink /`,
map[string]string{
"foo": "bar",
})
defer ctx.Close()
err := os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink"))
c.Assert(err, checker.IsNil)
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
out, _ := dockerCmd(c, "run", "--rm", name, "cat", "asymlink")
c.Assert(out, checker.Matches, "bar")
}
// #17827
func (s *DockerSuite) TestBuildCacheRootSource(c *check.C) {
name := "testbuildrootsource"
ctx := fakeContext(c, `
FROM busybox
COPY / /data`,
map[string]string{
"foo": "bar",
})
defer ctx.Close()
// warm up cache
buildImageSuccessfully(c, name, withExternalBuildContext(ctx))
// change file, should invalidate cache
err := ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644)
c.Assert(err, checker.IsNil)
result := buildImage(name, withExternalBuildContext(ctx))
result.Assert(c, icmd.Success)
c.Assert(result.Combined(), checker.Not(checker.Contains), "Using cache")
}
// #19375
func (s *DockerSuite) TestBuildFailsGitNotCallable(c *check.C) {
buildImage("gitnotcallable", cli.WithEnvironmentVariables("PATH="),
build.WithContextPath("github.com/docker/v1.10-migrator.git")).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "unable to prepare context: unable to find 'git': ",
})
buildImage("gitnotcallable", cli.WithEnvironmentVariables("PATH="),
build.WithContextPath("https://github.com/docker/v1.10-migrator.git")).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "unable to prepare context: unable to find 'git': ",
})
}
// TestBuildWorkdirWindowsPath tests that a Windows style path works as a workdir
func (s *DockerSuite) TestBuildWorkdirWindowsPath(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildworkdirwindowspath"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM `+testEnv.MinimalBaseImage()+`
RUN mkdir C:\\work
WORKDIR C:\\work
RUN if "%CD%" NEQ "C:\work" exit -1
`))
}
func (s *DockerSuite) TestBuildLabel(c *check.C) {
name := "testbuildlabel"
testLabel := "foo"
buildImageSuccessfully(c, name, cli.WithFlags("--label", testLabel),
build.WithDockerfile(`
FROM `+minimalBaseImage()+`
LABEL default foo
`))
var labels map[string]string
inspectFieldAndUnmarshall(c, name, "Config.Labels", &labels)
if _, ok := labels[testLabel]; !ok {
c.Fatal("label not found in image")
}
}
func (s *DockerSuite) TestBuildLabelOneNode(c *check.C) {
name := "testbuildlabel"
buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=bar"),
build.WithDockerfile("FROM busybox"))
var labels map[string]string
inspectFieldAndUnmarshall(c, name, "Config.Labels", &labels)
v, ok := labels["foo"]
if !ok {
c.Fatal("label `foo` not found in image")
}
c.Assert(v, checker.Equals, "bar")
}
func (s *DockerSuite) TestBuildLabelCacheCommit(c *check.C) {
name := "testbuildlabelcachecommit"
testLabel := "foo"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM `+minimalBaseImage()+`
LABEL default foo
`))
buildImageSuccessfully(c, name, cli.WithFlags("--label", testLabel),
build.WithDockerfile(`
FROM `+minimalBaseImage()+`
LABEL default foo
`))
var labels map[string]string
inspectFieldAndUnmarshall(c, name, "Config.Labels", &labels)
if _, ok := labels[testLabel]; !ok {
c.Fatal("label not found in image")
}
}
func (s *DockerSuite) TestBuildLabelMultiple(c *check.C) {
name := "testbuildlabelmultiple"
testLabels := map[string]string{
"foo": "bar",
"123": "456",
}
labelArgs := []string{}
for k, v := range testLabels {
labelArgs = append(labelArgs, "--label", k+"="+v)
}
buildImageSuccessfully(c, name, cli.WithFlags(labelArgs...),
build.WithDockerfile(`
FROM `+minimalBaseImage()+`
LABEL default foo
`))
var labels map[string]string
inspectFieldAndUnmarshall(c, name, "Config.Labels", &labels)
for k, v := range testLabels {
if x, ok := labels[k]; !ok || x != v {
c.Fatalf("label %s=%s not found in image", k, v)
}
}
}
func (s *DockerRegistryAuthHtpasswdSuite) TestBuildFromAuthenticatedRegistry(c *check.C) {
dockerCmd(c, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL)
baseImage := privateRegistryURL + "/baseimage"
buildImageSuccessfully(c, baseImage, build.WithDockerfile(`
FROM busybox
ENV env1 val1
`))
dockerCmd(c, "push", baseImage)
dockerCmd(c, "rmi", baseImage)
buildImageSuccessfully(c, baseImage, build.WithDockerfile(fmt.Sprintf(`
FROM %s
ENV env2 val2
`, baseImage)))
}
func (s *DockerRegistryAuthHtpasswdSuite) TestBuildWithExternalAuth(c *check.C) {
osPath := os.Getenv("PATH")
defer os.Setenv("PATH", osPath)
workingDir, err := os.Getwd()
c.Assert(err, checker.IsNil)
absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth"))
c.Assert(err, checker.IsNil)
testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute)
os.Setenv("PATH", testPath)
repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL)
tmp, err := ioutil.TempDir("", "integration-cli-")
c.Assert(err, checker.IsNil)
externalAuthConfig := `{ "credsStore": "shell-test" }`
configPath := filepath.Join(tmp, "config.json")
err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644)
c.Assert(err, checker.IsNil)
dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL)
b, err := ioutil.ReadFile(configPath)
c.Assert(err, checker.IsNil)
c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":")
dockerCmd(c, "--config", tmp, "tag", "busybox", repoName)
dockerCmd(c, "--config", tmp, "push", repoName)
// make sure the image is pulled when building
dockerCmd(c, "rmi", repoName)
icmd.RunCmd(icmd.Cmd{
Command: []string{dockerBinary, "--config", tmp, "build", "-"},
Stdin: strings.NewReader(fmt.Sprintf("FROM %s", repoName)),
}).Assert(c, icmd.Success)
}
// Test cases in #22036
func (s *DockerSuite) TestBuildLabelsOverride(c *check.C) {
// Command line option labels will always override
name := "scratchy"
expected := `{"bar":"from-flag","foo":"from-flag"}`
buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=from-flag", "--label", "bar=from-flag"),
build.WithDockerfile(`FROM `+minimalBaseImage()+`
LABEL foo=from-dockerfile`))
res := inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
name = "from"
expected = `{"foo":"from-dockerfile"}`
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
LABEL foo from-dockerfile`))
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
// Command line option label will override even via `FROM`
name = "new"
expected = `{"bar":"from-dockerfile2","foo":"new"}`
buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=new"),
build.WithDockerfile(`FROM from
LABEL bar from-dockerfile2`))
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
// Command line option without a value set (--label foo, --label bar=)
// will be treated as --label foo="", --label bar=""
name = "scratchy2"
expected = `{"bar":"","foo":""}`
buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo", "--label", "bar="),
build.WithDockerfile(`FROM `+minimalBaseImage()+`
LABEL foo=from-dockerfile`))
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
// Command line option without a value set (--label foo, --label bar=)
// will be treated as --label foo="", --label bar=""
// This time is for inherited images
name = "new2"
expected = `{"bar":"","foo":""}`
buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=", "--label", "bar"),
build.WithDockerfile(`FROM from
LABEL bar from-dockerfile2`))
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
// Command line option labels with only `FROM`
name = "scratchy"
expected = `{"bar":"from-flag","foo":"from-flag"}`
buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=from-flag", "--label", "bar=from-flag"),
build.WithDockerfile(`FROM `+minimalBaseImage()))
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
// Command line option labels with env var
name = "scratchz"
expected = `{"bar":"$PATH"}`
buildImageSuccessfully(c, name, cli.WithFlags("--label", "bar=$PATH"),
build.WithDockerfile(`FROM `+minimalBaseImage()))
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
}
// Test case for #22855
func (s *DockerSuite) TestBuildDeleteCommittedFile(c *check.C) {
name := "test-delete-committed-file"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
RUN echo test > file
RUN test -e file
RUN rm file
RUN sh -c "! test -e file"`))
}
// #20083
func (s *DockerSuite) TestBuildDockerignoreComment(c *check.C) {
// TODO Windows: Figure out why this test is flakey on TP5. If you add
// something like RUN sleep 5, or even RUN ls /tmp after the ADD line,
// it is more reliable, but that's not a good fix.
testRequires(c, DaemonIsLinux)
name := "testbuilddockerignorecleanpaths"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN sh -c "(ls -la /tmp/#1)"
RUN sh -c "(! ls -la /tmp/#2)"
RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (ls /tmp/dir1/foo)"`
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", dockerfile),
withFile("foo", "foo"),
withFile("foo2", "foo2"),
withFile("dir1/foo", "foo in dir1"),
withFile("#1", "# file 1"),
withFile("#2", "# file 2"),
withFile(".dockerignore", `# Visual C++ cache files
# because we have git ;-)
# The above comment is from #20083
foo
#dir1/foo
foo2
# The following is considered as comment as # is at the beginning
#1
# The following is not considered as comment as # is not at the beginning
#2
`)))
}
// Test case for #23221
func (s *DockerSuite) TestBuildWithUTF8BOM(c *check.C) {
name := "test-with-utf8-bom"
dockerfile := []byte(`FROM busybox`)
bomDockerfile := append([]byte{0xEF, 0xBB, 0xBF}, dockerfile...)
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", string(bomDockerfile)),
))
}
// Test case for UTF-8 BOM in .dockerignore, related to #23221
func (s *DockerSuite) TestBuildWithUTF8BOMDockerignore(c *check.C) {
name := "test-with-utf8-bom-dockerignore"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN ls -la /tmp
RUN sh -c "! ls /tmp/Dockerfile"
RUN ls /tmp/.dockerignore`
dockerignore := []byte("./Dockerfile\n")
bomDockerignore := append([]byte{0xEF, 0xBB, 0xBF}, dockerignore...)
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", dockerfile),
withFile(".dockerignore", string(bomDockerignore)),
))
}
// #22489 Shell test to confirm config gets updated correctly
func (s *DockerSuite) TestBuildShellUpdatesConfig(c *check.C) {
name := "testbuildshellupdatesconfig"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
SHELL ["foo", "-bar"]`))
expected := `["foo","-bar","#(nop) ","SHELL [foo -bar]"]`
res := inspectFieldJSON(c, name, "ContainerConfig.Cmd")
if res != expected {
c.Fatalf("%s, expected %s", res, expected)
}
res = inspectFieldJSON(c, name, "ContainerConfig.Shell")
if res != `["foo","-bar"]` {
c.Fatalf(`%s, expected ["foo","-bar"]`, res)
}
}
// #22489 Changing the shell multiple times and CMD after.
func (s *DockerSuite) TestBuildShellMultiple(c *check.C) {
name := "testbuildshellmultiple"
result := buildImage(name, build.WithDockerfile(`FROM busybox
RUN echo defaultshell
SHELL ["echo"]
RUN echoshell
SHELL ["ls"]
RUN -l
CMD -l`))
result.Assert(c, icmd.Success)
// Must contain 'defaultshell' twice
if len(strings.Split(result.Combined(), "defaultshell")) != 3 {
c.Fatalf("defaultshell should have appeared twice in %s", result.Combined())
}
// Must contain 'echoshell' twice
if len(strings.Split(result.Combined(), "echoshell")) != 3 {
c.Fatalf("echoshell should have appeared twice in %s", result.Combined())
}
// Must contain "total " (part of ls -l)
if !strings.Contains(result.Combined(), "total ") {
c.Fatalf("%s should have contained 'total '", result.Combined())
}
// A container started from the image uses the shell-form CMD.
// Last shell is ls. CMD is -l. So should contain 'total '.
outrun, _ := dockerCmd(c, "run", "--rm", name)
if !strings.Contains(outrun, "total ") {
c.Fatalf("Expected started container to run ls -l. %s", outrun)
}
}
// #22489. Changed SHELL with ENTRYPOINT
func (s *DockerSuite) TestBuildShellEntrypoint(c *check.C) {
name := "testbuildshellentrypoint"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
SHELL ["ls"]
ENTRYPOINT -l`))
// A container started from the image uses the shell-form ENTRYPOINT.
// Shell is ls. ENTRYPOINT is -l. So should contain 'total '.
outrun, _ := dockerCmd(c, "run", "--rm", name)
if !strings.Contains(outrun, "total ") {
c.Fatalf("Expected started container to run ls -l. %s", outrun)
}
}
// #22489 Shell test to confirm shell is inherited in a subsequent build
func (s *DockerSuite) TestBuildShellInherited(c *check.C) {
name1 := "testbuildshellinherited1"
buildImageSuccessfully(c, name1, build.WithDockerfile(`FROM busybox
SHELL ["ls"]`))
name2 := "testbuildshellinherited2"
buildImage(name2, build.WithDockerfile(`FROM `+name1+`
RUN -l`)).Assert(c, icmd.Expected{
// ls -l has "total " followed by some number in it, ls without -l does not.
Out: "total ",
})
}
// #22489 Shell test to confirm non-JSON doesn't work
func (s *DockerSuite) TestBuildShellNotJSON(c *check.C) {
name := "testbuildshellnotjson"
buildImage(name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
sHeLl exec -form`, // Casing explicit to ensure error is upper-cased.
)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "SHELL requires the arguments to be in JSON form",
})
}
// #22489 Windows shell test to confirm native is powershell if executing a PS command
// This would error if the default shell were still cmd.
func (s *DockerSuite) TestBuildShellWindowsPowershell(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildshellpowershell"
buildImage(name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
SHELL ["powershell", "-command"]
RUN Write-Host John`)).Assert(c, icmd.Expected{
Out: "\nJohn\n",
})
}
// Verify that escape is being correctly applied to words when escape directive is not \.
// Tests WORKDIR, ADD
func (s *DockerSuite) TestBuildEscapeNotBackslashWordTest(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildescapenotbackslashwordtesta"
buildImage(name, build.WithDockerfile(`# escape= `+"`"+`
FROM `+minimalBaseImage()+`
WORKDIR c:\windows
RUN dir /w`)).Assert(c, icmd.Expected{
Out: "[System32]",
})
name = "testbuildescapenotbackslashwordtestb"
buildImage(name, build.WithDockerfile(`# escape= `+"`"+`
FROM `+minimalBaseImage()+`
SHELL ["powershell.exe"]
WORKDIR c:\foo
ADD Dockerfile c:\foo\
RUN dir Dockerfile`)).Assert(c, icmd.Expected{
Out: "-a----",
})
}
// #22868. Make sure shell-form CMD is marked as escaped in the config of the image
func (s *DockerSuite) TestBuildCmdShellArgsEscaped(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildcmdshellescaped"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM `+minimalBaseImage()+`
CMD "ipconfig"
`))
res := inspectFieldJSON(c, name, "Config.ArgsEscaped")
if res != "true" {
c.Fatalf("CMD did not update Config.ArgsEscaped on image: %v", res)
}
dockerCmd(c, "run", "--name", "inspectme", name)
dockerCmd(c, "wait", "inspectme")
res = inspectFieldJSON(c, name, "Config.Cmd")
if res != `["cmd","/S","/C","\"ipconfig\""]` {
c.Fatalf("CMD was not escaped Config.Cmd: got %v", res)
}
}
// Test case for #24912.
func (s *DockerSuite) TestBuildStepsWithProgress(c *check.C) {
name := "testbuildstepswithprogress"
totalRun := 5
result := buildImage(name, build.WithDockerfile("FROM busybox\n"+strings.Repeat("RUN echo foo\n", totalRun)))
result.Assert(c, icmd.Success)
c.Assert(result.Combined(), checker.Contains, fmt.Sprintf("Step 1/%d : FROM busybox", 1+totalRun))
for i := 2; i <= 1+totalRun; i++ {
c.Assert(result.Combined(), checker.Contains, fmt.Sprintf("Step %d/%d : RUN echo foo", i, 1+totalRun))
}
}
func (s *DockerSuite) TestBuildWithFailure(c *check.C) {
name := "testbuildwithfailure"
// First test case can only detect `nobody` in runtime so all steps will show up
dockerfile := "FROM busybox\nRUN nobody"
result := buildImage(name, build.WithDockerfile(dockerfile))
c.Assert(result.Error, checker.NotNil)
c.Assert(result.Stdout(), checker.Contains, "Step 1/2 : FROM busybox")
c.Assert(result.Stdout(), checker.Contains, "Step 2/2 : RUN nobody")
// Second test case `FFOM` should have been detected before build runs so no steps
dockerfile = "FFOM nobody\nRUN nobody"
result = buildImage(name, build.WithDockerfile(dockerfile))
c.Assert(result.Error, checker.NotNil)
c.Assert(result.Stdout(), checker.Not(checker.Contains), "Step 1/2 : FROM busybox")
c.Assert(result.Stdout(), checker.Not(checker.Contains), "Step 2/2 : RUN nobody")
}
func (s *DockerSuite) TestBuildCacheFromEqualDiffIDsLength(c *check.C) {
dockerfile := `
FROM busybox
RUN echo "test"
ENTRYPOINT ["sh"]`
ctx := fakeContext(c, dockerfile, map[string]string{
"Dockerfile": dockerfile,
})
defer ctx.Close()
buildImageSuccessfully(c, "build1", withExternalBuildContext(ctx))
id1 := getIDByName(c, "build1")
// rebuild with cache-from
result := buildImage("build2", cli.WithFlags("--cache-from=build1"), withExternalBuildContext(ctx))
result.Assert(c, icmd.Success)
id2 := getIDByName(c, "build2")
c.Assert(id1, checker.Equals, id2)
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 2)
}
func (s *DockerSuite) TestBuildCacheFrom(c *check.C) {
testRequires(c, DaemonIsLinux) // All tests that do save are skipped in windows
dockerfile := `
FROM busybox
ENV FOO=bar
ADD baz /
RUN touch bax`
ctx := fakeContext(c, dockerfile, map[string]string{
"Dockerfile": dockerfile,
"baz": "baz",
})
defer ctx.Close()
buildImageSuccessfully(c, "build1", withExternalBuildContext(ctx))
id1 := getIDByName(c, "build1")
// rebuild with cache-from
result := buildImage("build2", cli.WithFlags("--cache-from=build1"), withExternalBuildContext(ctx))
result.Assert(c, icmd.Success)
id2 := getIDByName(c, "build2")
c.Assert(id1, checker.Equals, id2)
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 3)
dockerCmd(c, "rmi", "build2")
// no cache match with unknown source
result = buildImage("build2", cli.WithFlags("--cache-from=nosuchtag"), withExternalBuildContext(ctx))
result.Assert(c, icmd.Success)
id2 = getIDByName(c, "build2")
c.Assert(id1, checker.Not(checker.Equals), id2)
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 0)
dockerCmd(c, "rmi", "build2")
// clear parent images
tempDir, err := ioutil.TempDir("", "test-build-cache-from-")
if err != nil {
c.Fatalf("failed to create temporary directory: %s", tempDir)
}
defer os.RemoveAll(tempDir)
tempFile := filepath.Join(tempDir, "img.tar")
dockerCmd(c, "save", "-o", tempFile, "build1")
dockerCmd(c, "rmi", "build1")
dockerCmd(c, "load", "-i", tempFile)
parentID, _ := dockerCmd(c, "inspect", "-f", "{{.Parent}}", "build1")
c.Assert(strings.TrimSpace(parentID), checker.Equals, "")
// cache still applies without parents
result = buildImage("build2", cli.WithFlags("--cache-from=build1"), withExternalBuildContext(ctx))
result.Assert(c, icmd.Success)
id2 = getIDByName(c, "build2")
c.Assert(id1, checker.Equals, id2)
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 3)
history1, _ := dockerCmd(c, "history", "-q", "build2")
// Retry, no new intermediate images
result = buildImage("build3", cli.WithFlags("--cache-from=build1"), withExternalBuildContext(ctx))
result.Assert(c, icmd.Success)
id3 := getIDByName(c, "build3")
c.Assert(id1, checker.Equals, id3)
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 3)
history2, _ := dockerCmd(c, "history", "-q", "build3")
c.Assert(history1, checker.Equals, history2)
dockerCmd(c, "rmi", "build2")
dockerCmd(c, "rmi", "build3")
dockerCmd(c, "rmi", "build1")
dockerCmd(c, "load", "-i", tempFile)
// Modify file, everything up to last command and layers are reused
dockerfile = `
FROM busybox
ENV FOO=bar
ADD baz /
RUN touch newfile`
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "Dockerfile"), []byte(dockerfile), 0644)
c.Assert(err, checker.IsNil)
result = buildImage("build2", cli.WithFlags("--cache-from=build1"), withExternalBuildContext(ctx))
result.Assert(c, icmd.Success)
id2 = getIDByName(c, "build2")
c.Assert(id1, checker.Not(checker.Equals), id2)
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 2)
layers1Str, _ := dockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build1")
layers2Str, _ := dockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build2")
var layers1 []string
var layers2 []string
c.Assert(json.Unmarshal([]byte(layers1Str), &layers1), checker.IsNil)
c.Assert(json.Unmarshal([]byte(layers2Str), &layers2), checker.IsNil)
c.Assert(len(layers1), checker.Equals, len(layers2))
for i := 0; i < len(layers1)-1; i++ {
c.Assert(layers1[i], checker.Equals, layers2[i])
}
c.Assert(layers1[len(layers1)-1], checker.Not(checker.Equals), layers2[len(layers1)-1])
}
func (s *DockerSuite) TestBuildCacheMultipleFrom(c *check.C) {
testRequires(c, DaemonIsLinux) // All tests that do save are skipped in windows
dockerfile := `
FROM busybox
ADD baz /
FROM busybox
ADD baz /`
ctx := fakeContext(c, dockerfile, map[string]string{
"Dockerfile": dockerfile,
"baz": "baz",
})
defer ctx.Close()
result := buildImage("build1", withExternalBuildContext(ctx))
result.Assert(c, icmd.Success)
// second part of dockerfile was a repeat of first so should be cached
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 1)
result = buildImage("build2", cli.WithFlags("--cache-from=build1"), withExternalBuildContext(ctx))
result.Assert(c, icmd.Success)
// now both parts of dockerfile should be cached
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 2)
}
func (s *DockerSuite) TestBuildNetNone(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildnetnone"
buildImage(name, cli.WithFlags("--network=none"), build.WithDockerfile(`
FROM busybox
RUN ping -c 1 8.8.8.8
`)).Assert(c, icmd.Expected{
ExitCode: 1,
Out: "unreachable",
})
}
func (s *DockerSuite) TestBuildNetContainer(c *check.C) {
testRequires(c, DaemonIsLinux)
id, _ := dockerCmd(c, "run", "--hostname", "foobar", "-d", "busybox", "nc", "-ll", "-p", "1234", "-e", "hostname")
name := "testbuildnetcontainer"
buildImageSuccessfully(c, name, cli.WithFlags("--network=container:"+strings.TrimSpace(id)),
build.WithDockerfile(`
FROM busybox
RUN nc localhost 1234 > /otherhost
`))
host, _ := dockerCmd(c, "run", "testbuildnetcontainer", "cat", "/otherhost")
c.Assert(strings.TrimSpace(host), check.Equals, "foobar")
}
func (s *DockerSuite) TestBuildWithExtraHost(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildwithextrahost"
buildImageSuccessfully(c, name,
cli.WithFlags(
"--add-host", "foo:127.0.0.1",
"--add-host", "bar:127.0.0.1",
),
build.WithDockerfile(`
FROM busybox
RUN ping -c 1 foo
RUN ping -c 1 bar
`))
}
func (s *DockerSuite) TestBuildWithExtraHostInvalidFormat(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerfile := `
FROM busybox
RUN ping -c 1 foo`
testCases := []struct {
testName string
dockerfile string
buildFlag string
}{
{"extra_host_missing_ip", dockerfile, "--add-host=foo"},
{"extra_host_missing_ip_with_delimeter", dockerfile, "--add-host=foo:"},
{"extra_host_missing_hostname", dockerfile, "--add-host=:127.0.0.1"},
{"extra_host_invalid_ipv4", dockerfile, "--add-host=foo:101.10.2"},
{"extra_host_invalid_ipv6", dockerfile, "--add-host=foo:2001::1::3F"},
}
for _, tc := range testCases {
result := buildImage(tc.testName, cli.WithFlags(tc.buildFlag), build.WithDockerfile(tc.dockerfile))
result.Assert(c, icmd.Expected{
ExitCode: 125,
})
}
}
func (s *DockerSuite) TestBuildSquashParent(c *check.C) {
testRequires(c, ExperimentalDaemon)
dockerFile := `
FROM busybox
RUN echo hello > /hello
RUN echo world >> /hello
RUN echo hello > /remove_me
ENV HELLO world
RUN rm /remove_me
`
// build and get the ID that we can use later for history comparison
name := "test"
buildImageSuccessfully(c, name, build.WithDockerfile(dockerFile))
origID := getIDByName(c, name)
// build with squash
buildImageSuccessfully(c, name, cli.WithFlags("--squash"), build.WithDockerfile(dockerFile))
id := getIDByName(c, name)
out, _ := dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", "cat /hello")
c.Assert(strings.TrimSpace(out), checker.Equals, "hello\nworld")
dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", "[ ! -f /remove_me ]")
dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", `[ "$(echo $HELLO)" == "world" ]`)
// make sure the ID produced is the ID of the tag we specified
inspectID := inspectImage(c, "test", ".ID")
c.Assert(inspectID, checker.Equals, id)
origHistory, _ := dockerCmd(c, "history", origID)
testHistory, _ := dockerCmd(c, "history", "test")
splitOrigHistory := strings.Split(strings.TrimSpace(origHistory), "\n")
splitTestHistory := strings.Split(strings.TrimSpace(testHistory), "\n")
c.Assert(len(splitTestHistory), checker.Equals, len(splitOrigHistory)+1)
out = inspectImage(c, id, "len .RootFS.Layers")
c.Assert(strings.TrimSpace(out), checker.Equals, "3")
}
func (s *DockerSuite) TestBuildContChar(c *check.C) {
name := "testbuildcontchar"
buildImage(name, build.WithDockerfile(`FROM busybox\`)).Assert(c, icmd.Expected{
Out: "Step 1/1 : FROM busybox",
})
result := buildImage(name, build.WithDockerfile(`FROM busybox
RUN echo hi \`))
result.Assert(c, icmd.Success)
c.Assert(result.Combined(), checker.Contains, "Step 1/2 : FROM busybox")
c.Assert(result.Combined(), checker.Contains, "Step 2/2 : RUN echo hi\n")
result = buildImage(name, build.WithDockerfile(`FROM busybox
RUN echo hi \\`))
result.Assert(c, icmd.Success)
c.Assert(result.Combined(), checker.Contains, "Step 1/2 : FROM busybox")
c.Assert(result.Combined(), checker.Contains, "Step 2/2 : RUN echo hi \\\n")
result = buildImage(name, build.WithDockerfile(`FROM busybox
RUN echo hi \\\`))
result.Assert(c, icmd.Success)
c.Assert(result.Combined(), checker.Contains, "Step 1/2 : FROM busybox")
c.Assert(result.Combined(), checker.Contains, "Step 2/2 : RUN echo hi \\\\\n")
}
func (s *DockerSuite) TestBuildCopyFromPreviousRootFS(c *check.C) {
dockerfile := `
FROM busybox
COPY foo bar
FROM busybox
%s
COPY baz baz
RUN echo mno > baz/cc
FROM busybox
COPY bar /
COPY --from=1 baz sub/
COPY --from=0 bar baz
COPY --from=0 bar bay`
ctx := fakeContext(c, fmt.Sprintf(dockerfile, ""), map[string]string{
"Dockerfile": dockerfile,
"foo": "abc",
"bar": "def",
"baz/aa": "ghi",
"baz/bb": "jkl",
})
defer ctx.Close()
result := buildImage("build1", withExternalBuildContext(ctx))
result.Assert(c, icmd.Success)
out, _ := dockerCmd(c, "run", "build1", "cat", "bar")
c.Assert(strings.TrimSpace(out), check.Equals, "def")
out, _ = dockerCmd(c, "run", "build1", "cat", "sub/aa")
c.Assert(strings.TrimSpace(out), check.Equals, "ghi")
out, _ = dockerCmd(c, "run", "build1", "cat", "sub/cc")
c.Assert(strings.TrimSpace(out), check.Equals, "mno")
out, _ = dockerCmd(c, "run", "build1", "cat", "baz")
c.Assert(strings.TrimSpace(out), check.Equals, "abc")
out, _ = dockerCmd(c, "run", "build1", "cat", "bay")
c.Assert(strings.TrimSpace(out), check.Equals, "abc")
result = buildImage("build2", withExternalBuildContext(ctx))
result.Assert(c, icmd.Success)
// all commands should be cached
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 7)
err := ioutil.WriteFile(filepath.Join(ctx.Dir, "Dockerfile"), []byte(fmt.Sprintf(dockerfile, "COPY baz/aa foo")), 0644)
c.Assert(err, checker.IsNil)
// changing file in parent block should not affect last block
result = buildImage("build3", withExternalBuildContext(ctx))
result.Assert(c, icmd.Success)
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 5)
c.Assert(getIDByName(c, "build1"), checker.Equals, getIDByName(c, "build2"))
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("pqr"), 0644)
c.Assert(err, checker.IsNil)
// changing file in parent block should affect both first and last block
result = buildImage("build4", withExternalBuildContext(ctx))
result.Assert(c, icmd.Success)
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 5)
out, _ = dockerCmd(c, "run", "build4", "cat", "bay")
c.Assert(strings.TrimSpace(out), check.Equals, "pqr")
out, _ = dockerCmd(c, "run", "build4", "cat", "baz")
c.Assert(strings.TrimSpace(out), check.Equals, "pqr")
}
func (s *DockerSuite) TestBuildCopyFromPreviousRootFSErrors(c *check.C) {
dockerfile := `
FROM busybox
COPY --from=foo foo bar`
ctx := fakeContext(c, dockerfile, map[string]string{
"Dockerfile": dockerfile,
"foo": "abc",
})
defer ctx.Close()
buildImage("build1", withExternalBuildContext(ctx)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "from expects an integer value corresponding to the context number",
})
dockerfile = `
FROM busybox
COPY --from=0 foo bar`
ctx = fakeContext(c, dockerfile, map[string]string{
"Dockerfile": dockerfile,
"foo": "abc",
})
defer ctx.Close()
buildImage("build1", withExternalBuildContext(ctx)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "invalid from flag value 0 refers current build block",
})
}
func (s *DockerSuite) TestBuildCopyFromPreviousFrom(c *check.C) {
dockerfile := `
FROM busybox
COPY foo bar`
ctx := fakeContext(c, dockerfile, map[string]string{
"Dockerfile": dockerfile,
"foo": "abc",
})
defer ctx.Close()
result := buildImage("build1", withExternalBuildContext(ctx))
result.Assert(c, icmd.Success)
dockerfile = `
FROM build1:latest
FROM busybox
COPY --from=0 bar /
COPY foo /`
ctx = fakeContext(c, dockerfile, map[string]string{
"Dockerfile": dockerfile,
"foo": "def",
})
defer ctx.Close()
result = buildImage("build2", withExternalBuildContext(ctx))
result.Assert(c, icmd.Success)
out, _ := dockerCmd(c, "run", "build2", "cat", "bar")
c.Assert(strings.TrimSpace(out), check.Equals, "abc")
out, _ = dockerCmd(c, "run", "build2", "cat", "foo")
c.Assert(strings.TrimSpace(out), check.Equals, "def")
}
// TestBuildOpaqueDirectory tests that a build succeeds which
// creates opaque directories.
// See https://github.com/docker/docker/issues/25244
func (s *DockerSuite) TestBuildOpaqueDirectory(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerFile := `
FROM busybox
RUN mkdir /dir1 && touch /dir1/f1
RUN rm -rf /dir1 && mkdir /dir1 && touch /dir1/f2
RUN touch /dir1/f3
RUN [ -f /dir1/f2 ]
`
// Test that build succeeds, last command fails if opaque directory
// was not handled correctly
buildImageSuccessfully(c, "testopaquedirectory", build.WithDockerfile(dockerFile))
}
// Windows test for USER in dockerfile
func (s *DockerSuite) TestBuildWindowsUser(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildwindowsuser"
buildImage(name, build.WithDockerfile(`FROM `+testEnv.MinimalBaseImage()+`
RUN net user user /add
USER user
RUN set username
`)).Assert(c, icmd.Expected{
Out: "USERNAME=user",
})
}
// Verifies if COPY file . when WORKDIR is set to a non-existing directory,
// the directory is created and the file is copied into the directory,
// as opposed to the file being copied as a file with the name of the
// directory. Fix for 27545 (found on Windows, but regression good for Linux too).
// Note 27545 was reverted in 28505, but a new fix was added subsequently in 28514.
func (s *DockerSuite) TestBuildCopyFileDotWithWorkdir(c *check.C) {
name := "testbuildcopyfiledotwithworkdir"
buildImageSuccessfully(c, name, withBuildContext(c,
withFile("Dockerfile", `FROM busybox
WORKDIR /foo
COPY file .
RUN ["cat", "/foo/file"]
`),
withFile("file", "content"),
))
}
// Case-insensitive environment variables on Windows
func (s *DockerSuite) TestBuildWindowsEnvCaseInsensitive(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildwindowsenvcaseinsensitive"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM `+testEnv.MinimalBaseImage()+`
ENV FOO=bar foo=baz
`))
res := inspectFieldJSON(c, name, "Config.Env")
if res != `["foo=baz"]` { // Should not have FOO=bar in it - takes the last one processed. And only one entry as deduped.
c.Fatalf("Case insensitive environment variables on Windows failed. Got %s", res)
}
}
// Test case for 29667
func (s *DockerSuite) TestBuildWorkdirImageCmd(c *check.C) {
image := "testworkdirimagecmd"
buildImageSuccessfully(c, image, build.WithDockerfile(`
FROM busybox
WORKDIR /foo/bar
`))
out, _ := dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", image)
// The Windows busybox image has a blank `cmd`
lookingFor := `["sh"]`
if testEnv.DaemonPlatform() == "windows" {
lookingFor = "null"
}
c.Assert(strings.TrimSpace(out), checker.Equals, lookingFor)
image = "testworkdirlabelimagecmd"
buildImageSuccessfully(c, image, build.WithDockerfile(`
FROM busybox
WORKDIR /foo/bar
LABEL a=b
`))
out, _ = dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", image)
c.Assert(strings.TrimSpace(out), checker.Equals, lookingFor)
}
// Test case for 28902/28909
func (s *DockerSuite) TestBuildWorkdirCmd(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildworkdircmd"
dockerFile := `
FROM busybox
WORKDIR /
`
buildImageSuccessfully(c, name, build.WithDockerfile(dockerFile))
result := buildImage(name, build.WithDockerfile(dockerFile))
result.Assert(c, icmd.Success)
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 1)
}
// FIXME(vdemeester) should be a unit test
func (s *DockerSuite) TestBuildLineErrorOnBuild(c *check.C) {
name := "test_build_line_error_onbuild"
buildImage(name, build.WithDockerfile(`FROM busybox
ONBUILD
`)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "Dockerfile parse error line 2: ONBUILD requires at least one argument",
})
}
// FIXME(vdemeester) should be a unit test
func (s *DockerSuite) TestBuildLineErrorUknownInstruction(c *check.C) {
name := "test_build_line_error_unknown_instruction"
buildImage(name, build.WithDockerfile(`FROM busybox
RUN echo hello world
NOINSTRUCTION echo ba
RUN echo hello
ERROR
`)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "Dockerfile parse error line 3: Unknown instruction: NOINSTRUCTION",
})
}
// FIXME(vdemeester) should be a unit test
func (s *DockerSuite) TestBuildLineErrorWithEmptyLines(c *check.C) {
name := "test_build_line_error_with_empty_lines"
buildImage(name, build.WithDockerfile(`
FROM busybox
RUN echo hello world
NOINSTRUCTION echo ba
CMD ["/bin/init"]
`)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "Dockerfile parse error line 6: Unknown instruction: NOINSTRUCTION",
})
}
// FIXME(vdemeester) should be a unit test
func (s *DockerSuite) TestBuildLineErrorWithComments(c *check.C) {
name := "test_build_line_error_with_comments"
buildImage(name, build.WithDockerfile(`FROM busybox
# This will print hello world
# and then ba
RUN echo hello world
NOINSTRUCTION echo ba
`)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "Dockerfile parse error line 5: Unknown instruction: NOINSTRUCTION",
})
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
support/scripts/build_release_artifacts/main.go
|
package main
// This is a build script that Travis uses to build Paydex release packages.
import (
"flag"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/paydex-core/paydex-go/support/errors"
"github.com/paydex-core/paydex-go/support/log"
)
var extractBinName = regexp.MustCompile(`^(?P<bin>[a-z-]+)-(?P<tag>.+)$`)
var builds = []buildConfig{
{"darwin", "amd64"},
{"linux", "amd64"},
{"linux", "arm"},
{"windows", "amd64"},
}
var binFilter = flag.String("bin", "", "restrict build to single binary")
var osFilter = flag.String("os", "", "restrict build to single os")
var archFilter = flag.String("arch", "", "restrict build to single arch")
var keepDir = flag.Bool("keep", false, "when true, artifact directories are not removed after packaging")
type buildConfig struct {
OS string
Arch string
}
func main() {
flag.Parse()
log.SetLevel(log.InfoLevel)
run("rm", "-rf", "dist/*")
if os.Getenv("TRAVIS_EVENT_TYPE") == "cron" {
buildNightlies()
os.Exit(0)
} else if os.Getenv("CIRCLE_TAG") != "" {
buildByTag()
os.Exit(0)
} else {
buildSnapshots()
os.Exit(0)
}
log.Info("nothing to do")
}
// package searches the `tools` and `services` packages of this repo to find
// the source directory. This is used within the script to find the README and
// other files that should be packaged with the binary.
func binPkgNames() []string {
result := []string{}
result = append(result, binNamesForDir("services")...)
result = append(result, binNamesForDir("tools")...)
return result
}
func binNamesForDir(dir string) []string {
files, err := ioutil.ReadDir(dir)
if err != nil {
panic(errors.Wrap(err, "read-dir failed"))
}
result := []string{}
for _, file := range files {
if file.IsDir() && file.Name() != "internal" {
result = append(result, filepath.Join(dir, file.Name()))
}
}
return result
}
func build(pkg, dest, version, buildOS, buildArch string) {
buildTime := time.Now().Format(time.RFC3339)
timeFlag := fmt.Sprintf("-X github.com/paydex-core/paydex-go/support/app.buildTime=%s", buildTime)
versionFlag := fmt.Sprintf("-X github.com/paydex-core/paydex-go/support/app.version=%s", version)
if buildOS == "windows" {
dest = dest + ".exe"
}
cmd := exec.Command("go", "build",
"-o", dest,
"-ldflags", fmt.Sprintf("%s %s", timeFlag, versionFlag),
pkg,
)
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stdout
cmd.Env = append(
os.Environ(),
fmt.Sprintf("GOOS=%s", buildOS),
fmt.Sprintf("GOARCH=%s", buildArch),
)
log.Infof("building %s", pkg)
log.Infof("running: %s", strings.Join(cmd.Args, " "))
err := cmd.Run()
if err != nil {
panic(err)
}
}
func buildNightlies() {
version := runOutput("git", "describe", "--always", "--dirty", "--tags")
repo := repoName()
for _, pkg := range binPkgNames() {
bin := filepath.Base(pkg)
if *binFilter != "" && *binFilter != bin {
continue
}
for _, cfg := range getBuildConfigs() {
dest := prepareDest(pkg, bin, "nightly", cfg.OS, cfg.Arch)
build(
fmt.Sprintf("%s/%s", repo, pkg),
filepath.Join(dest, bin),
version,
cfg.OS,
cfg.Arch,
)
packageArchive(dest, cfg.OS)
}
}
}
func buildByTag() {
bin, version := extractFromTag(os.Getenv("CIRCLE_TAG"))
pkg := packageName(bin)
repo := repoName()
if bin == "" {
log.Info("could not extract info from CIRCLE_TAG: skipping artifact packaging")
os.Exit(0)
}
// Don't build anything if no package can be found
if pkg == "" {
log.Infof("could not find `%s` in expected binary locations: skipping artifact packaging", bin)
os.Exit(0)
}
for _, cfg := range getBuildConfigs() {
dest := prepareDest(pkg, bin, version, cfg.OS, cfg.Arch)
// rebuild the binary with the version variable set
build(
fmt.Sprintf("%s/%s", repo, pkg),
filepath.Join(dest, bin),
version,
cfg.OS,
cfg.Arch,
)
packageArchive(dest, cfg.OS)
}
}
func buildSnapshots() {
rev := runOutput("git", "describe", "--always", "--dirty")
version := fmt.Sprintf("snapshot-%s", rev)
repo := repoName()
for _, pkg := range binPkgNames() {
bin := filepath.Base(pkg)
if *binFilter != "" && *binFilter != bin {
continue
}
for _, cfg := range getBuildConfigs() {
dest := prepareDest(pkg, bin, "snapshot", cfg.OS, cfg.Arch)
build(
fmt.Sprintf("%s/%s", repo, pkg),
filepath.Join(dest, bin),
version,
cfg.OS,
cfg.Arch,
)
packageArchive(dest, cfg.OS)
}
}
}
// extractFromTag extracts the name of the binary that should be packaged in the
// course of execution this script as well as the version it should be packaged
// as, based on the name of the tag in the CIRCLE_TAG environment variable.
// Tags must be of the form `NAME-vSEMVER`, such as `horizon-v1.0.0` to be
// matched by this function.
//
// In the event that the CIRCLE_TAG is missing or the match fails, an empty
// string will be returned.
func extractFromTag(tag string) (string, string) {
match := extractBinName.FindStringSubmatch(tag)
if match == nil {
return "", ""
}
return match[1], match[2]
}
func getBuildConfigs() (result []buildConfig) {
for _, cfg := range builds {
if *osFilter != "" && *osFilter != cfg.OS {
continue
}
if *archFilter != "" && *archFilter != cfg.Arch {
continue
}
result = append(result, cfg)
}
return
}
// packageArchive tars or zips `dest`, depending upon the OS, then removes
// `dest`, in preparation of Circle uploading all artifacts to github releases.
func packageArchive(dest, buildOS string) {
release := filepath.Base(dest)
dir := filepath.Dir(dest)
if buildOS == "windows" {
pop := pushdir(dir)
// zip $RELEASE.zip $RELEASE/*
run("zip", "-r", release+".zip", release)
pop()
} else {
// tar -czf $dest.tar.gz -C $DIST $RELEASE
run("tar", "-czf", dest+".tar.gz", "-C", dir, release)
}
if !*keepDir {
run("rm", "-rf", dest)
}
}
// package searches the `tools` and `services` packages of this repo to find
// the source directory. This is used within the script to find the README and
// other files that should be packaged with the binary.
func packageName(binName string) string {
targets := []string{
filepath.Join("services", binName),
filepath.Join("tools", binName),
}
var result string
// Note: we do not short circuit this search when we find a valid result so
// that we can panic when multiple results are found. The children of
// /services and /tools should not have name overlap.
for _, t := range targets {
_, err := os.Stat(t)
if os.IsNotExist(err) {
continue
}
if err != nil {
panic(errors.Wrap(err, "stat failed"))
}
if result != "" {
msg := fmt.Sprintf("sourceDir() found multiple results!: binName: %s", binName)
panic(msg)
}
result = t
}
return result
}
func prepareDest(pkg, bin, version, os, arch string) string {
name := fmt.Sprintf("%s-%s-%s-%s", bin, version, os, arch)
dest := filepath.Join("dist", name)
// make destination directories
run("mkdir", "-p", dest)
run("cp", "LICENSE-APACHE.txt", dest)
run("cp", "COPYING", dest)
run("cp", filepath.Join(pkg, "README.md"), dest)
run("cp", filepath.Join(pkg, "CHANGELOG.md"), dest)
return dest
}
// pushdir is a utility function to temporarily change directories. It returns
// a func that can be called to restore the current working directory to the
// state it was in when first calling pushdir.
func pushdir(dir string) func() {
cwd, err := os.Getwd()
if err != nil {
panic(errors.Wrap(err, "getwd failed"))
}
err = os.Chdir(dir)
if err != nil {
panic(errors.Wrap(err, "chdir failed"))
}
return func() {
err := os.Chdir(cwd)
if err != nil {
panic(errors.Wrap(err, "revert dir failed"))
}
}
}
func repoName() string {
if os.Getenv("REPO") != "" {
return os.Getenv("REPO")
}
return "github.com/paydex-core/paydex-go"
}
// utility command to run the provided command that echoes any output. A failed
// command will trigger a panic.
func run(name string, args ...string) {
cmd := exec.Command(name, args...)
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stdout
log.Infof("running: %s %s", name, strings.Join(args, " "))
err := cmd.Run()
if err != nil {
panic(err)
}
}
// utility command to run the provided command that returns the output. A
// failed command will trigger a panic.
func runOutput(name string, args ...string) string {
cmd := exec.Command(name, args...)
cmd.Stderr = os.Stderr
log.Infof("running: %s %s", name, strings.Join(args, " "))
out, err := cmd.Output()
if err != nil {
panic(err)
}
return strings.TrimSpace(string(out))
}
|
[
"\"TRAVIS_EVENT_TYPE\"",
"\"CIRCLE_TAG\"",
"\"CIRCLE_TAG\"",
"\"REPO\"",
"\"REPO\""
] |
[] |
[
"TRAVIS_EVENT_TYPE",
"REPO",
"CIRCLE_TAG"
] |
[]
|
["TRAVIS_EVENT_TYPE", "REPO", "CIRCLE_TAG"]
|
go
| 3 | 0 | |
management-api/apiredis/db.go
|
package apiredis
import (
"context"
"fmt"
"github.com/go-redis/redis/v8"
"os"
)
type RedisDB struct {
client *redis.Client
}
func NewRedisDB() *RedisDB {
host := os.Getenv("REDIS_HOST")
port := os.Getenv("REDIS_PORT")
addr := fmt.Sprintf("%s:%s", host, port)
return &RedisDB{
client: redis.NewClient(&redis.Options{
Addr: addr,
Password: "",
DB: 0,
}),
}
}
func (rd RedisDB) PostRouting(ctx context.Context, apikey, path, forwardURL string) error {
err := rd.client.HSet(ctx, apikey, path, forwardURL).Err()
return err
}
|
[
"\"REDIS_HOST\"",
"\"REDIS_PORT\""
] |
[] |
[
"REDIS_PORT",
"REDIS_HOST"
] |
[]
|
["REDIS_PORT", "REDIS_HOST"]
|
go
| 2 | 0 | |
pkg/cli/release.go
|
package cli
import (
"errors"
"fmt"
"os"
"path/filepath"
"github.com/jimeh/build-emacs-for-macos/pkg/plan"
"github.com/jimeh/build-emacs-for-macos/pkg/release"
"github.com/jimeh/build-emacs-for-macos/pkg/repository"
cli2 "github.com/urfave/cli/v2"
)
type releaseOptions struct {
Plan *plan.Plan
Repository *repository.Repository
Name string
GithubToken string
}
func releaseCmd() *cli2.Command {
tokenDefaultText := ""
if len(os.Getenv("GITHUB_TOKEN")) > 0 {
tokenDefaultText = "***"
}
return &cli2.Command{
Name: "release",
Usage: "manage GitHub releases",
Flags: []cli2.Flag{
&cli2.StringFlag{
Name: "plan",
Usage: "path to build plan YAML file produced by " +
"emacs-builder plan",
Aliases: []string{"p"},
EnvVars: []string{"EMACS_BUILDER_PLAN"},
TakesFile: true,
},
&cli2.StringFlag{
Name: "repository",
Aliases: []string{"repo", "r"},
Usage: "owner/name of GitHub repo to check for release, " +
"ignored if a plan is provided",
EnvVars: []string{"GITHUB_REPOSITORY"},
Value: "jimeh/emacs-builds",
},
&cli2.StringFlag{
Name: "name",
Aliases: []string{"n"},
Usage: "name of release to operate on, ignored if plan " +
"is provided",
},
&cli2.StringFlag{
Name: "github-token",
Usage: "GitHub API Token",
EnvVars: []string{"GITHUB_TOKEN"},
DefaultText: tokenDefaultText,
},
},
Subcommands: []*cli2.Command{
releaseCheckCmd(),
releasePublishCmd(),
releaseBulkCmd(),
},
}
}
func releaseActionWrapper(
f func(*cli2.Context, *Options, *releaseOptions) error,
) func(*cli2.Context) error {
return actionWrapper(func(c *cli2.Context, opts *Options) error {
rOpts := &releaseOptions{
Name: c.String("name"),
GithubToken: c.String("github-token"),
}
if r := c.String("repository"); r != "" {
var err error
rOpts.Repository, err = repository.NewGitHub(r)
if err != nil {
return err
}
}
if f := c.String("plan"); f != "" {
p, err := plan.Load(f)
if err != nil {
return err
}
rOpts.Plan = p
}
return f(c, opts, rOpts)
})
}
func releaseCheckCmd() *cli2.Command {
return &cli2.Command{
Name: "check",
Usage: "check if a GitHub release exists and has specified " +
"asset files",
ArgsUsage: "[<asset-file> ...]",
Action: releaseActionWrapper(releaseCheckAction),
}
}
func releaseCheckAction(
c *cli2.Context,
opts *Options,
rOpts *releaseOptions,
) error {
rlsOpts := &release.CheckOptions{
Repository: rOpts.Repository,
ReleaseName: rOpts.Name,
AssetFiles: c.Args().Slice(),
GithubToken: rOpts.GithubToken,
}
if rOpts.Plan != nil && rOpts.Plan.Release != nil {
rlsOpts.ReleaseName = rOpts.Plan.Release.Name
}
if rOpts.Plan != nil && rOpts.Plan.Output != nil {
rlsOpts.AssetFiles = []string{rOpts.Plan.Output.DiskImage}
}
return release.Check(c.Context, rlsOpts)
}
func releasePublishCmd() *cli2.Command {
return &cli2.Command{
Name: "publish",
Usage: "publish a GitHub release with specified asset " +
"files",
ArgsUsage: "[<asset-file> ...]",
Flags: []cli2.Flag{
&cli2.StringFlag{
Name: "sha",
Aliases: []string{"s"},
Usage: "git SHA to create release on",
EnvVars: []string{"GITHUB_SHA"},
},
&cli2.StringFlag{
Name: "type",
Aliases: []string{"t"},
Usage: "release type, must be normal, prerelease, or draft",
Value: "normal",
},
&cli2.StringFlag{
Name: "title",
Usage: "release title, will use release name if not " +
"specified",
Value: "",
},
},
Action: releaseActionWrapper(releasePublishAction),
}
}
func releasePublishAction(
c *cli2.Context,
opts *Options,
rOpts *releaseOptions,
) error {
rlsOpts := &release.PublishOptions{
Repository: rOpts.Repository,
CommitRef: c.String("release-sha"),
ReleaseName: rOpts.Name,
ReleaseTitle: c.String("title"),
AssetFiles: c.Args().Slice(),
GithubToken: rOpts.GithubToken,
}
rlsType := c.String("type")
switch rlsType {
case "draft":
rlsOpts.ReleaseType = release.Draft
case "prerelease":
rlsOpts.ReleaseType = release.Prerelease
case "normal":
rlsOpts.ReleaseType = release.Normal
default:
return fmt.Errorf("invalid --type \"%s\"", rlsType)
}
if rOpts.Plan != nil {
if rOpts.Plan.Release != nil {
rlsOpts.ReleaseName = rOpts.Plan.Release.Name
rlsOpts.ReleaseTitle = rOpts.Plan.Release.Title
if rOpts.Plan.Release.Draft {
rlsOpts.ReleaseType = release.Draft
} else if rOpts.Plan.Release.Prerelease {
rlsOpts.ReleaseType = release.Prerelease
}
}
if rOpts.Plan.Output != nil {
rlsOpts.AssetFiles = []string{
filepath.Join(
rOpts.Plan.Output.Directory,
rOpts.Plan.Output.DiskImage,
),
}
}
}
return release.Publish(c.Context, rlsOpts)
}
func releaseBulkCmd() *cli2.Command {
return &cli2.Command{
Name: "bulk",
Usage: "bulk modify GitHub releases",
ArgsUsage: "",
Flags: []cli2.Flag{
&cli2.StringFlag{
Name: "name",
Usage: "regexp pattern matching release names to modify",
},
&cli2.StringFlag{
Name: "prerelease",
Usage: "change prerelease flag, must be \"true\" or " +
"\"false\", otherwise prerelease value is not changed",
},
&cli2.BoolFlag{
Name: "dry-run",
Usage: "do not perform any changes",
},
},
Action: releaseActionWrapper(releaseBulkAction),
}
}
func releaseBulkAction(
c *cli2.Context,
opts *Options,
rOpts *releaseOptions,
) error {
bulkOpts := &release.BulkOptions{
Repository: rOpts.Repository,
NamePattern: c.String("name"),
DryRun: c.Bool("dry-run"),
GithubToken: rOpts.GithubToken,
}
switch c.String("prerelease") {
case "true":
v := true
bulkOpts.Prerelease = &v
case "false":
v := false
bulkOpts.Prerelease = &v
case "":
default:
return errors.New(
"--prerelease by me \"true\" or \"false\" when specified",
)
}
return release.Bulk(c.Context, bulkOpts)
}
|
[
"\"GITHUB_TOKEN\""
] |
[] |
[
"GITHUB_TOKEN"
] |
[]
|
["GITHUB_TOKEN"]
|
go
| 1 | 0 | |
jfrog-cli/xray/commands/offlineupdate.go
|
package commands
import (
"encoding/json"
"errors"
"fmt"
"github.com/jfrog/jfrog-cli-go/jfrog-client/errors/httperrors"
"github.com/jfrog/jfrog-cli-go/jfrog-client/utils/errorutils"
"github.com/jfrog/jfrog-cli-go/jfrog-client/utils/io/fileutils"
"github.com/jfrog/jfrog-cli-go/jfrog-client/utils/io/httputils"
"github.com/jfrog/jfrog-cli-go/jfrog-client/utils/log"
"io/ioutil"
"net/http"
"os"
"strconv"
"strings"
"github.com/jfrog/jfrog-cli-go/jfrog-client/utils"
)
const (
Vulnerability = "__vuln"
Component = "__comp"
JxrayDefaultBaseUrl = "https://jxray.jfrog.io/"
JxrayApiBundles = "api/v1/updates/bundles"
JxrayApiOnboarding = "api/v1/updates/onboarding"
)
func OfflineUpdate(flags *OfflineUpdatesFlags) error {
updatesUrl, err := buildUpdatesUrl(flags)
if err != nil {
return err
}
vulnerabilities, components, lastUpdate, err := getFilesList(updatesUrl, flags)
if err != nil {
return err
}
zipSuffix := "_" + strconv.FormatInt(lastUpdate, 10)
xrayTempDir, err := getXrayTempDir()
if err != nil {
return err
}
if len(vulnerabilities) > 0 {
log.Info("Downloading vulnerabilities...")
err := saveData(xrayTempDir, "vuln", zipSuffix, vulnerabilities)
if err != nil {
return err
}
} else {
log.Info("There are no new vulnerabilities.")
}
if len(components) > 0 {
log.Info("Downloading components...")
err := saveData(xrayTempDir, "comp", zipSuffix, components)
if err != nil {
return err
}
} else {
log.Info("There are no new components.")
}
return nil
}
func getUpdatesBaseUrl(datesSpecified bool) string {
jxRayBaseUrl := os.Getenv("JFROG_CLI_JXRAY_BASE_URL")
jxRayBaseUrl = utils.AddTrailingSlashIfNeeded(jxRayBaseUrl)
if jxRayBaseUrl == "" {
jxRayBaseUrl = JxrayDefaultBaseUrl
}
if datesSpecified {
return jxRayBaseUrl + JxrayApiBundles
}
return jxRayBaseUrl + JxrayApiOnboarding
}
func buildUpdatesUrl(flags *OfflineUpdatesFlags) (string, error) {
var queryParams string
datesSpecified := flags.From > 0 && flags.To > 0
if datesSpecified {
if err := validateDates(flags.From, flags.To); err != nil {
return "", err
}
queryParams += fmt.Sprintf("from=%v&to=%v", flags.From, flags.To)
}
if flags.Version != "" {
if queryParams != "" {
queryParams += "&"
}
queryParams += fmt.Sprintf("version=%v", flags.Version)
}
url := getUpdatesBaseUrl(datesSpecified)
if queryParams != "" {
url += "?" + queryParams
}
return url, nil
}
func validateDates(from, to int64) error {
if from < 0 || to < 0 {
err := errors.New("Invalid dates")
return errorutils.CheckError(err)
}
if from > to {
err := errors.New("Invalid dates range.")
return errorutils.CheckError(err)
}
return nil
}
func getXrayTempDir() (string, error) {
tempDir := os.TempDir()
xrayDir := tempDir + "/jfrog/xray/"
if err := os.MkdirAll(xrayDir, 0777); err != nil {
errorutils.CheckError(err)
return "", nil
}
return xrayDir, nil
}
func saveData(xrayTmpDir, filesPrefix, zipSuffix string, urlsList []string) error {
dataDir, err := ioutil.TempDir(xrayTmpDir, filesPrefix)
if err != nil {
return err
}
defer func() {
if cerr := os.RemoveAll(dataDir); cerr != nil && err == nil {
err = cerr
}
}()
for _, url := range urlsList {
fileName, err := createXrayFileNameFromUrl(url)
if err != nil {
return err
}
log.Info("Downloading", url)
_, err = httputils.DownloadFile(url, dataDir, fileName, httputils.HttpClientDetails{})
if err != nil {
return err
}
}
log.Info("Zipping files.")
err = fileutils.ZipFolderFiles(dataDir, filesPrefix+zipSuffix+".zip")
if err != nil {
return err
}
log.Info("Done zipping files.")
return nil
}
func createXrayFileNameFromUrl(url string) (fileName string, err error) {
originalUrl := url
index := strings.Index(url, "?")
if index != -1 {
url = url[:index]
}
index = strings.Index(url, ";")
if index != -1 {
url = url[:index]
}
sections := strings.Split(url, "/")
length := len(sections)
if length < 2 {
err = errorutils.CheckError(errors.New(fmt.Sprintf("Unexpected URL format: %s", originalUrl)))
return
}
fileName = fmt.Sprintf("%s__%s", sections[length-2], sections[length-1])
return
}
func getFilesList(updatesUrl string, flags *OfflineUpdatesFlags) (vulnerabilities []string, components []string, lastUpdate int64, err error) {
log.Info("Getting updates...")
headers := make(map[string]string)
headers["X-Xray-License"] = flags.License
httpClientDetails := httputils.HttpClientDetails{
Headers: headers,
}
resp, body, _, err := httputils.SendGet(updatesUrl, false, httpClientDetails)
if err != nil {
errorutils.CheckError(err)
return
}
if err = httperrors.CheckResponseStatus(resp, body, http.StatusOK); err != nil {
errorutils.CheckError(errors.New("Response: " + err.Error()))
return
}
var urls FilesList
err = json.Unmarshal(body, &urls)
if err != nil {
err = errorutils.CheckError(errors.New("Failed parsing json response: " + string(body)))
return
}
for _, v := range urls.Urls {
if strings.Contains(v, Vulnerability) {
vulnerabilities = append(vulnerabilities, v)
} else if strings.Contains(v, Component) {
components = append(components, v)
}
}
lastUpdate = urls.Last_update
return
}
type OfflineUpdatesFlags struct {
License string
From int64
To int64
Version string
}
type FilesList struct {
Last_update int64
Urls []string
}
|
[
"\"JFROG_CLI_JXRAY_BASE_URL\""
] |
[] |
[
"JFROG_CLI_JXRAY_BASE_URL"
] |
[]
|
["JFROG_CLI_JXRAY_BASE_URL"]
|
go
| 1 | 0 | |
cmd/kops/integration_test.go
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"io"
"io/ioutil"
"os"
"path"
"reflect"
"sort"
"strings"
"testing"
"time"
"k8s.io/kops/cmd/kops/util"
"k8s.io/kops/pkg/diff"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/pkg/jsonutils"
"k8s.io/kops/pkg/testutils"
"k8s.io/kops/upup/pkg/fi/cloudup"
"k8s.io/kops/upup/pkg/fi/cloudup/gce"
"github.com/ghodss/yaml"
"golang.org/x/crypto/ssh"
)
// updateClusterTestBase is added automatically to the srcDir on all
// tests using runTest, including runTestAWS, runTestGCE
const updateClusterTestBase = "../../tests/integration/update_cluster/"
// TestMinimal runs the test on a minimum configuration, similar to kops create cluster minimal.example.com --zones us-west-1a
func TestMinimal(t *testing.T) {
runTestAWS(t, "minimal.example.com", "minimal", "v1alpha0", false, 1, true)
runTestAWS(t, "minimal.example.com", "minimal", "v1alpha1", false, 1, true)
runTestAWS(t, "minimal.example.com", "minimal", "v1alpha2", false, 1, true)
}
// TestHA runs the test on a simple HA configuration, similar to kops create cluster minimal.example.com --zones us-west-1a,us-west-1b,us-west-1c --master-count=3
func TestHA(t *testing.T) {
runTestAWS(t, "ha.example.com", "ha", "v1alpha1", false, 3, true)
runTestAWS(t, "ha.example.com", "ha", "v1alpha2", false, 3, true)
}
// TestHighAvailabilityGCE runs the test on a simple HA GCE configuration, similar to kops create cluster ha-gce.example.com
// --zones us-test1-a,us-test1-b,us-test1-c --master-count=3
func TestHighAvailabilityGCE(t *testing.T) {
runTestGCE(t, "ha-gce.example.com", "ha_gce", "v1alpha2", false, 3)
}
// TestComplex runs the test on a more complex configuration, intended to hit more of the edge cases
func TestComplex(t *testing.T) {
runTestAWS(t, "complex.example.com", "complex", "v1alpha2", false, 1, true)
}
// TestMinimalCloudformation runs the test on a minimum configuration, similar to kops create cluster minimal.example.com --zones us-west-1a
func TestMinimalCloudformation(t *testing.T) {
runTestCloudformation(t, "minimal.example.com", "minimal-cloudformation", "v1alpha2", false, nil)
}
// TestExistingIAMCloudformation runs the test with existing IAM instance profiles, similar to kops create cluster minimal.example.com --zones us-west-1a
func TestExistingIAMCloudformation(t *testing.T) {
lifecycleOverrides := []string{"IAMRole=ExistsAndWarnIfChanges", "IAMRolePolicy=ExistsAndWarnIfChanges", "IAMInstanceProfileRole=ExistsAndWarnIfChanges"}
runTestCloudformation(t, "minimal.example.com", "existing_iam_cloudformation", "v1alpha2", false, lifecycleOverrides)
}
// TestAdditionalUserData runs the test on passing additional user-data to an instance at bootstrap.
func TestAdditionalUserData(t *testing.T) {
runTestCloudformation(t, "additionaluserdata.example.com", "additional_user-data", "v1alpha2", false, nil)
}
// TestBastionAdditionalUserData runs the test on passing additional user-data to a bastion instance group
func TestBastionAdditionalUserData(t *testing.T) {
runTestAWS(t, "bastionuserdata.example.com", "bastionadditional_user-data", "v1alpha2", true, 1, true)
}
// TestMinimal_141 runs the test on a configuration from 1.4.1 release
func TestMinimal_141(t *testing.T) {
runTestAWS(t, "minimal-141.example.com", "minimal-141", "v1alpha0", false, 1, true)
}
// TestPrivateWeave runs the test on a configuration with private topology, weave networking
func TestPrivateWeave(t *testing.T) {
runTestAWS(t, "privateweave.example.com", "privateweave", "v1alpha1", true, 1, true)
runTestAWS(t, "privateweave.example.com", "privateweave", "v1alpha2", true, 1, true)
}
// TestPrivateFlannel runs the test on a configuration with private topology, flannel networking
func TestPrivateFlannel(t *testing.T) {
runTestAWS(t, "privateflannel.example.com", "privateflannel", "v1alpha1", true, 1, true)
runTestAWS(t, "privateflannel.example.com", "privateflannel", "v1alpha2", true, 1, true)
}
// TestPrivateCalico runs the test on a configuration with private topology, calico networking
func TestPrivateCalico(t *testing.T) {
runTestAWS(t, "privatecalico.example.com", "privatecalico", "v1alpha1", true, 1, true)
runTestAWS(t, "privatecalico.example.com", "privatecalico", "v1alpha2", true, 1, true)
}
// TestPrivateCanal runs the test on a configuration with private topology, canal networking
func TestPrivateCanal(t *testing.T) {
runTestAWS(t, "privatecanal.example.com", "privatecanal", "v1alpha1", true, 1, true)
runTestAWS(t, "privatecanal.example.com", "privatecanal", "v1alpha2", true, 1, true)
}
// TestPrivateKopeio runs the test on a configuration with private topology, kopeio networking
func TestPrivateKopeio(t *testing.T) {
runTestAWS(t, "privatekopeio.example.com", "privatekopeio", "v1alpha2", true, 1, true)
}
// TestPrivateSharedSubnet runs the test on a configuration with private topology & shared subnets
func TestPrivateSharedSubnet(t *testing.T) {
runTestAWS(t, "private-shared-subnet.example.com", "private-shared-subnet", "v1alpha2", true, 1, true)
}
// TestPrivateDns1 runs the test on a configuration with private topology, private dns
func TestPrivateDns1(t *testing.T) {
runTestAWS(t, "privatedns1.example.com", "privatedns1", "v1alpha2", true, 1, true)
}
// TestPrivateDns2 runs the test on a configuration with private topology, private dns, extant vpc
func TestPrivateDns2(t *testing.T) {
runTestAWS(t, "privatedns2.example.com", "privatedns2", "v1alpha2", true, 1, true)
}
// TestSharedSubnet runs the test on a configuration with a shared subnet (and VPC)
func TestSharedSubnet(t *testing.T) {
runTestAWS(t, "sharedsubnet.example.com", "shared_subnet", "v1alpha2", false, 1, true)
}
// TestSharedVPC runs the test on a configuration with a shared VPC
func TestSharedVPC(t *testing.T) {
runTestAWS(t, "sharedvpc.example.com", "shared_vpc", "v1alpha2", false, 1, true)
}
// TestExistingIAM runs the test on a configuration with existing IAM instance profiles
func TestExistingIAM(t *testing.T) {
runTestAWS(t, "existing-iam.example.com", "existing_iam", "v1alpha2", false, 3, false)
}
// TestAdditionalCIDR runs the test on a configuration with a shared VPC
func TestAdditionalCIDR(t *testing.T) {
runTestCloudformation(t, "additionalcidr.example.com", "additional_cidr", "v1alpha2", false, nil)
}
// TestPhaseNetwork tests the output of tf for the network phase
func TestPhaseNetwork(t *testing.T) {
runTestPhase(t, "lifecyclephases.example.com", "lifecycle_phases", "v1alpha2", true, 1, cloudup.PhaseNetwork)
}
func TestExternalLoadBalancer(t *testing.T) {
runTestAWS(t, "externallb.example.com", "externallb", "v1alpha2", false, 1, true)
runTestCloudformation(t, "externallb.example.com", "externallb", "v1alpha2", false, nil)
}
// TestPhaseIAM tests the output of tf for the iam phase
func TestPhaseIAM(t *testing.T) {
t.Skip("unable to test w/o allowing failed validation")
runTestPhase(t, "lifecyclephases.example.com", "lifecycle_phases", "v1alpha2", true, 1, cloudup.PhaseSecurity)
}
// TestPhaseCluster tests the output of tf for the cluster phase
func TestPhaseCluster(t *testing.T) {
// TODO fix tf for phase, and allow override on validation
t.Skip("unable to test w/o allowing failed validation")
runTestPhase(t, "lifecyclephases.example.com", "lifecycle_phases", "v1alpha2", true, 1, cloudup.PhaseCluster)
}
func runTest(t *testing.T, h *testutils.IntegrationTestHarness, clusterName string, srcDir string, version string, private bool, zones int, expectedDataFilenames []string, tfFileName string, phase *cloudup.Phase, lifecycleOverrides []string) {
var stdout bytes.Buffer
srcDir = updateClusterTestBase + srcDir
inputYAML := "in-" + version + ".yaml"
testDataTFPath := "kubernetes.tf"
actualTFPath := "kubernetes.tf"
if tfFileName != "" {
testDataTFPath = tfFileName
}
factoryOptions := &util.FactoryOptions{}
factoryOptions.RegistryPath = "memfs://tests"
factory := util.NewFactory(factoryOptions)
{
options := &CreateOptions{}
options.Filenames = []string{path.Join(srcDir, inputYAML)}
err := RunCreate(factory, &stdout, options)
if err != nil {
t.Fatalf("error running %q create: %v", inputYAML, err)
}
}
{
options := &CreateSecretPublickeyOptions{}
options.ClusterName = clusterName
options.Name = "admin"
options.PublicKeyPath = path.Join(srcDir, "id_rsa.pub")
err := RunCreateSecretPublicKey(factory, &stdout, options)
if err != nil {
t.Fatalf("error running %q create: %v", inputYAML, err)
}
}
{
options := &UpdateClusterOptions{}
options.InitDefaults()
options.Target = "terraform"
options.OutDir = path.Join(h.TempDir, "out")
options.RunTasksOptions.MaxTaskDuration = 30 * time.Second
if phase != nil {
options.Phase = string(*phase)
}
// We don't test it here, and it adds a dependency on kubectl
options.CreateKubecfg = false
options.LifecycleOverrides = lifecycleOverrides
_, err := RunUpdateCluster(factory, clusterName, &stdout, options)
if err != nil {
t.Fatalf("error running update cluster %q: %v", clusterName, err)
}
}
// Compare main files
{
files, err := ioutil.ReadDir(path.Join(h.TempDir, "out"))
if err != nil {
t.Fatalf("failed to read dir: %v", err)
}
var fileNames []string
for _, f := range files {
fileNames = append(fileNames, f.Name())
}
sort.Strings(fileNames)
actualFilenames := strings.Join(fileNames, ",")
expectedFilenames := "kubernetes.tf"
if len(expectedDataFilenames) > 0 {
expectedFilenames = "data,kubernetes.tf"
}
if actualFilenames != expectedFilenames {
t.Fatalf("unexpected files. actual=%q, expected=%q, test=%q", actualFilenames, expectedFilenames, testDataTFPath)
}
actualTF, err := ioutil.ReadFile(path.Join(h.TempDir, "out", actualTFPath))
if err != nil {
t.Fatalf("unexpected error reading actual terraform output: %v", err)
}
expectedTF, err := ioutil.ReadFile(path.Join(srcDir, testDataTFPath))
if err != nil {
t.Fatalf("unexpected error reading expected terraform output: %v", err)
}
expectedTF = bytes.Replace(expectedTF, []byte("\r\n"), []byte("\n"), -1)
if !bytes.Equal(actualTF, expectedTF) {
diffString := diff.FormatDiff(string(expectedTF), string(actualTF))
t.Logf("diff:\n%s\n", diffString)
if os.Getenv("HACK_UPDATE_EXPECTED_IN_PLACE") != "" {
fp := path.Join(srcDir, testDataTFPath)
t.Logf("HACK_UPDATE_EXPECTED_IN_PLACE: writing expected output %s", fp)
if err := ioutil.WriteFile(fp, actualTF, 0644); err != nil {
t.Errorf("error writing terraform output: %v", err)
}
t.Errorf("terraform output differed from expected")
return // Avoid Fatalf as we want to keep going and update all files
}
t.Fatalf("terraform output differed from expected")
}
}
// Compare data files if they are provided
if len(expectedDataFilenames) > 0 {
actualDataPath := path.Join(h.TempDir, "out", "data")
files, err := ioutil.ReadDir(actualDataPath)
if err != nil {
t.Fatalf("failed to read data dir: %v", err)
}
var actualDataFilenames []string
for _, f := range files {
actualDataFilenames = append(actualDataFilenames, f.Name())
}
sort.Strings(expectedDataFilenames)
if !reflect.DeepEqual(actualDataFilenames, expectedDataFilenames) {
t.Fatalf("unexpected data files. actual=%q, expected=%q", actualDataFilenames, expectedDataFilenames)
}
// Some tests might provide _some_ tf data files (not necessarily all that
// are actually produced), validate that the provided expected data file
// contents match actual data file content
expectedDataPath := path.Join(srcDir, "data")
if _, err := os.Stat(expectedDataPath); err == nil {
expectedDataFiles, err := ioutil.ReadDir(expectedDataPath)
if err != nil {
t.Fatalf("failed to read expected data dir: %v", err)
}
for _, expectedDataFile := range expectedDataFiles {
dataFileName := expectedDataFile.Name()
expectedDataContent, err :=
ioutil.ReadFile(path.Join(expectedDataPath, dataFileName))
if err != nil {
t.Fatalf("failed to read expected data file: %v", err)
}
actualDataContent, err :=
ioutil.ReadFile(path.Join(actualDataPath, dataFileName))
if err != nil {
t.Fatalf("failed to read actual data file: %v", err)
}
if string(expectedDataContent) != string(actualDataContent) {
t.Fatalf(
"actual data file (%s) did not match the content of expected data file (%s). "+
"NOTE: If outputs seem identical, check for end-of-line differences, "+
"especially if the file is in multipart MIME format!"+
"\nBEGIN_ACTUAL:\n%s\nEND_ACTUAL\nBEGIN_EXPECTED:\n%s\nEND_EXPECTED",
path.Join(actualDataPath, dataFileName),
path.Join(expectedDataPath, dataFileName),
actualDataContent,
expectedDataContent,
)
}
}
}
}
}
func runTestAWS(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int, expectPolicies bool) {
h := testutils.NewIntegrationTestHarness(t)
defer h.Close()
h.MockKopsVersion("1.8.1")
h.SetupMockAWS()
expectedFilenames := []string{
"aws_key_pair_kubernetes." + clusterName + "-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key",
"aws_launch_configuration_nodes." + clusterName + "_user_data",
}
for i := 0; i < zones; i++ {
zone := "us-test-1" + string([]byte{byte('a') + byte(i)})
s := "aws_launch_configuration_master-" + zone + ".masters." + clusterName + "_user_data"
expectedFilenames = append(expectedFilenames, s)
}
lifecycleOverrides := []string{}
if !expectPolicies {
lifecycleOverrides = append(lifecycleOverrides, "IAMRole=Ignore")
lifecycleOverrides = append(lifecycleOverrides, "IAMRolePolicy=Ignore")
lifecycleOverrides = append(lifecycleOverrides, "IAMInstanceProfileRole=Ignore")
}
if expectPolicies {
expectedFilenames = append(expectedFilenames, []string{
"aws_iam_role_masters." + clusterName + "_policy",
"aws_iam_role_nodes." + clusterName + "_policy",
"aws_iam_role_policy_masters." + clusterName + "_policy",
"aws_iam_role_policy_nodes." + clusterName + "_policy",
}...)
if private {
expectedFilenames = append(expectedFilenames, []string{
"aws_iam_role_bastions." + clusterName + "_policy",
"aws_iam_role_policy_bastions." + clusterName + "_policy",
// bastions usually don't have any userdata
// "aws_launch_configuration_bastions." + clusterName + "_user_data",
}...)
}
}
// Special case that tests a bastion with user-data
if srcDir == "bastionadditional_user-data" {
expectedFilenames = append(expectedFilenames, "aws_launch_configuration_bastion."+clusterName+"_user_data")
}
runTest(t, h, clusterName, srcDir, version, private, zones, expectedFilenames, "", nil, lifecycleOverrides)
}
func runTestPhase(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int, phase cloudup.Phase) {
h := testutils.NewIntegrationTestHarness(t)
defer h.Close()
h.MockKopsVersion("1.8.1")
h.SetupMockAWS()
phaseName := string(phase)
if phaseName == "" {
t.Fatalf("phase must be set")
}
tfFileName := phaseName + "-kubernetes.tf"
expectedFilenames := []string{}
if phase == cloudup.PhaseSecurity {
expectedFilenames = []string{
"aws_iam_role_masters." + clusterName + "_policy",
"aws_iam_role_nodes." + clusterName + "_policy",
"aws_iam_role_policy_masters." + clusterName + "_policy",
"aws_iam_role_policy_nodes." + clusterName + "_policy",
"aws_key_pair_kubernetes." + clusterName + "-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key",
}
if private {
expectedFilenames = append(expectedFilenames, []string{
"aws_iam_role_bastions." + clusterName + "_policy",
"aws_iam_role_policy_bastions." + clusterName + "_policy",
// bastions don't have any userdata
// "aws_launch_configuration_bastions." + clusterName + "_user_data",
}...)
}
} else if phase == cloudup.PhaseCluster {
expectedFilenames = []string{
"aws_launch_configuration_nodes." + clusterName + "_user_data",
}
for i := 0; i < zones; i++ {
zone := "us-test-1" + string([]byte{byte('a') + byte(i)})
s := "aws_launch_configuration_master-" + zone + ".masters." + clusterName + "_user_data"
expectedFilenames = append(expectedFilenames, s)
}
}
runTest(t, h, clusterName, srcDir, version, private, zones, expectedFilenames, tfFileName, &phase, nil)
}
func runTestGCE(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int) {
featureflag.ParseFlags("+AlphaAllowGCE")
h := testutils.NewIntegrationTestHarness(t)
defer h.Close()
h.MockKopsVersion("1.8.1")
h.SetupMockGCE()
expectedFilenames := []string{
"google_compute_instance_template_nodes-" + gce.SafeClusterName(clusterName) + "_metadata_cluster-name",
"google_compute_instance_template_nodes-" + gce.SafeClusterName(clusterName) + "_metadata_startup-script",
"google_compute_instance_template_nodes-" + gce.SafeClusterName(clusterName) + "_metadata_ssh-keys",
}
for i := 0; i < zones; i++ {
zone := "us-test1-" + string([]byte{byte('a') + byte(i)})
prefix := "google_compute_instance_template_master-" + zone + "-" + gce.SafeClusterName(clusterName) + "_metadata_"
expectedFilenames = append(expectedFilenames, prefix+"cluster-name")
expectedFilenames = append(expectedFilenames, prefix+"startup-script")
expectedFilenames = append(expectedFilenames, prefix+"ssh-keys")
}
runTest(t, h, clusterName, srcDir, version, private, zones, expectedFilenames, "", nil, nil)
}
func runTestCloudformation(t *testing.T, clusterName string, srcDir string, version string, private bool, lifecycleOverrides []string) {
srcDir = updateClusterTestBase + srcDir
var stdout bytes.Buffer
inputYAML := "in-" + version + ".yaml"
expectedCfPath := "cloudformation.json"
factoryOptions := &util.FactoryOptions{}
factoryOptions.RegistryPath = "memfs://tests"
h := testutils.NewIntegrationTestHarness(t)
defer h.Close()
h.MockKopsVersion("1.8.1")
h.SetupMockAWS()
factory := util.NewFactory(factoryOptions)
{
options := &CreateOptions{}
options.Filenames = []string{path.Join(srcDir, inputYAML)}
err := RunCreate(factory, &stdout, options)
if err != nil {
t.Fatalf("error running %q create: %v", inputYAML, err)
}
}
{
options := &CreateSecretPublickeyOptions{}
options.ClusterName = clusterName
options.Name = "admin"
options.PublicKeyPath = path.Join(srcDir, "id_rsa.pub")
err := RunCreateSecretPublicKey(factory, &stdout, options)
if err != nil {
t.Fatalf("error running %q create: %v", inputYAML, err)
}
}
{
options := &UpdateClusterOptions{}
options.InitDefaults()
options.Target = "cloudformation"
options.OutDir = path.Join(h.TempDir, "out")
options.RunTasksOptions.MaxTaskDuration = 30 * time.Second
// We don't test it here, and it adds a dependency on kubectl
options.CreateKubecfg = false
options.LifecycleOverrides = lifecycleOverrides
_, err := RunUpdateCluster(factory, clusterName, &stdout, options)
if err != nil {
t.Fatalf("error running update cluster %q: %v", clusterName, err)
}
}
// Compare main files
{
files, err := ioutil.ReadDir(path.Join(h.TempDir, "out"))
if err != nil {
t.Fatalf("failed to read dir: %v", err)
}
var fileNames []string
for _, f := range files {
fileNames = append(fileNames, f.Name())
}
sort.Strings(fileNames)
actualFilenames := strings.Join(fileNames, ",")
expectedFilenames := "kubernetes.json"
if actualFilenames != expectedFilenames {
t.Fatalf("unexpected files. actual=%q, expected=%q", actualFilenames, expectedFilenames)
}
actualPath := path.Join(h.TempDir, "out", "kubernetes.json")
actualCF, err := ioutil.ReadFile(actualPath)
if err != nil {
t.Fatalf("unexpected error reading actual cloudformation output: %v", err)
}
expectedCF, err := ioutil.ReadFile(path.Join(srcDir, expectedCfPath))
if err != nil {
t.Fatalf("unexpected error reading expected cloudformation output: %v", err)
}
// Expand out the UserData base64 blob, as otherwise testing is painful
extracted := make(map[string]string)
var buf bytes.Buffer
out := jsonutils.NewJSONStreamWriter(&buf)
in := json.NewDecoder(bytes.NewReader(actualCF))
for {
token, err := in.Token()
if err != nil {
if err == io.EOF {
break
} else {
t.Fatalf("unexpected error parsing cloudformation output: %v", err)
}
}
if strings.HasSuffix(out.Path(), ".UserData") {
if s, ok := token.(string); ok {
vBytes, err := base64.StdEncoding.DecodeString(s)
if err != nil {
t.Fatalf("error decoding UserData: %v", err)
} else {
extracted[out.Path()] = string(vBytes)
token = json.Token("extracted")
}
}
}
if err := out.WriteToken(token); err != nil {
t.Fatalf("error writing json: %v", err)
}
}
actualCF = buf.Bytes()
expectedCFTrimmed := strings.Replace(strings.TrimSpace(string(expectedCF)), "\r\n", "\n", -1)
actualCFTrimmed := strings.TrimSpace(string(actualCF))
if actualCFTrimmed != expectedCFTrimmed {
diffString := diff.FormatDiff(expectedCFTrimmed, actualCFTrimmed)
t.Logf("diff:\n%s\n", diffString)
if os.Getenv("KEEP_TEMP_DIR") == "" {
t.Logf("(hint: setting KEEP_TEMP_DIR will preserve test output")
} else {
t.Logf("actual terraform output in %s", actualPath)
}
t.Fatalf("cloudformation output differed from expected. Test file: %s", path.Join(srcDir, expectedCfPath))
}
fp := path.Join(srcDir, expectedCfPath+".extracted.yaml")
expectedExtracted, err := ioutil.ReadFile(fp)
if err != nil {
t.Fatalf("unexpected error reading expected extracted cloudformation output: %v", err)
}
expected := make(map[string]string)
err = yaml.Unmarshal(expectedExtracted, &expected)
if err != nil {
t.Fatalf("unexpected error unmarshal expected extracted cloudformation output: %v", err)
}
if len(extracted) != len(expected) {
t.Fatalf("error differed number of cloudformation in expected and extracted: %v", err)
}
actual := make(map[string]string)
for key, expectedValue := range expected {
extractedValue, ok := extracted[key]
if !ok {
t.Fatalf("unexpected error expected cloudformation not found for k: %v", key)
}
actual[key] = extractedValue
// Strip carriage return as expectedValue is stored in a yaml string literal
// and golang will automatically strip CR from any string literal
extractedValueTrimmed := strings.Replace(extractedValue, "\r", "", -1)
if expectedValue != extractedValueTrimmed {
if os.Getenv("HACK_UPDATE_EXPECTED_IN_PLACE") != "" {
t.Errorf("cloudformation output differed from expected")
continue // Avoid Fatalf as we want to keep going and update all files
}
diffString := diff.FormatDiff(expectedValue, extractedValueTrimmed)
t.Logf("diff for key %s:\n%s\n\n\n\n\n\n", key, diffString)
t.Fatalf("cloudformation output differed from expected. Test file: %s", path.Join(srcDir, expectedCfPath+".extracted.yaml"))
}
}
if os.Getenv("HACK_UPDATE_EXPECTED_IN_PLACE") != "" {
t.Logf("HACK_UPDATE_EXPECTED_IN_PLACE: writing expected output %s", fp)
b, err := yaml.Marshal(actual)
if err != nil {
t.Errorf("error serializing cloudformation output: %v", err)
}
if err := ioutil.WriteFile(fp, b, 0644); err != nil {
t.Errorf("error writing cloudformation output: %v", err)
}
}
}
}
func MakeSSHKeyPair(publicKeyPath string, privateKeyPath string) error {
privateKey, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
return err
}
var privateKeyBytes bytes.Buffer
privateKeyPEM := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)}
if err := pem.Encode(&privateKeyBytes, privateKeyPEM); err != nil {
return err
}
if err := ioutil.WriteFile(privateKeyPath, privateKeyBytes.Bytes(), os.FileMode(0700)); err != nil {
return err
}
publicKey, err := ssh.NewPublicKey(&privateKey.PublicKey)
if err != nil {
return err
}
publicKeyBytes := ssh.MarshalAuthorizedKey(publicKey)
if err := ioutil.WriteFile(publicKeyPath, publicKeyBytes, os.FileMode(0744)); err != nil {
return err
}
return nil
}
|
[
"\"HACK_UPDATE_EXPECTED_IN_PLACE\"",
"\"KEEP_TEMP_DIR\"",
"\"HACK_UPDATE_EXPECTED_IN_PLACE\"",
"\"HACK_UPDATE_EXPECTED_IN_PLACE\""
] |
[] |
[
"KEEP_TEMP_DIR",
"HACK_UPDATE_EXPECTED_IN_PLACE"
] |
[]
|
["KEEP_TEMP_DIR", "HACK_UPDATE_EXPECTED_IN_PLACE"]
|
go
| 2 | 0 | |
sdks/python/http_client/v1/polyaxon_sdk/models/v1_resource_type.py
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.7.2
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1ResourceType(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
INT = "int"
FLOAT = "float"
allowable_values = [INT, FLOAT] # noqa: E501
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self, local_vars_configuration=None): # noqa: E501
"""V1ResourceType - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ResourceType):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ResourceType):
return True
return self.to_dict() != other.to_dict()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
upup/pkg/fi/cloudup/apply_cluster.go
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudup
import (
"bytes"
"context"
"fmt"
"net/url"
"os"
"path"
"strings"
"github.com/blang/semver/v4"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
kopsbase "k8s.io/kops"
"k8s.io/kops/pkg/acls"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/registry"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/apis/kops/validation"
"k8s.io/kops/pkg/apis/nodeup"
"k8s.io/kops/pkg/assets"
"k8s.io/kops/pkg/client/simple"
"k8s.io/kops/pkg/client/simple/vfsclientset"
"k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/pkg/model"
"k8s.io/kops/pkg/model/alimodel"
"k8s.io/kops/pkg/model/awsmodel"
"k8s.io/kops/pkg/model/components"
"k8s.io/kops/pkg/model/components/etcdmanager"
"k8s.io/kops/pkg/model/components/kubeapiserver"
"k8s.io/kops/pkg/model/domodel"
"k8s.io/kops/pkg/model/gcemodel"
"k8s.io/kops/pkg/model/iam"
"k8s.io/kops/pkg/model/openstackmodel"
"k8s.io/kops/pkg/model/spotinstmodel"
"k8s.io/kops/pkg/resources/digitalocean"
"k8s.io/kops/pkg/templates"
"k8s.io/kops/upup/models"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/aliup"
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
"k8s.io/kops/upup/pkg/fi/cloudup/cloudformation"
"k8s.io/kops/upup/pkg/fi/cloudup/do"
"k8s.io/kops/upup/pkg/fi/cloudup/gce"
"k8s.io/kops/upup/pkg/fi/cloudup/openstack"
"k8s.io/kops/upup/pkg/fi/cloudup/terraform"
"k8s.io/kops/util/pkg/architectures"
"k8s.io/kops/util/pkg/vfs"
)
const (
starline = "*********************************************************************************"
)
var (
// AlphaAllowDO is a feature flag that gates DigitalOcean support while it is alpha
AlphaAllowDO = featureflag.New("AlphaAllowDO", featureflag.Bool(false))
// AlphaAllowGCE is a feature flag that gates GCE support while it is alpha
AlphaAllowGCE = featureflag.New("AlphaAllowGCE", featureflag.Bool(false))
// AlphaAllowALI is a feature flag that gates aliyun support while it is alpha
AlphaAllowALI = featureflag.New("AlphaAllowALI", featureflag.Bool(false))
// OldestSupportedKubernetesVersion is the oldest kubernetes version that is supported in Kops
OldestSupportedKubernetesVersion = "1.11.0"
// OldestRecommendedKubernetesVersion is the oldest kubernetes version that is not deprecated in Kops
OldestRecommendedKubernetesVersion = "1.13.0"
)
type ApplyClusterCmd struct {
Cluster *kops.Cluster
InstanceGroups []*kops.InstanceGroup
// NodeUpSource is the location from which we download nodeup
NodeUpSource map[architectures.Architecture]string
// NodeUpHash is the sha hash
NodeUpHash map[architectures.Architecture]string
// TargetName specifies how we are operating e.g. direct to GCE, or AWS, or dry-run, or terraform
TargetName string
// Target is the fi.Target we will operate against
Target fi.Target
// OutDir is a local directory in which we place output, can cache files etc
OutDir string
// Assets is a list of sources for files (primarily when not using everything containerized)
// Formats:
// raw url: http://... or https://...
// url with hash: <hex>@http://... or <hex>@https://...
Assets map[architectures.Architecture][]*MirroredAsset
Clientset simple.Clientset
// DryRun is true if this is only a dry run
DryRun bool
// AllowKopsDowngrade permits applying with a kops version older than what was last used to apply to the cluster.
AllowKopsDowngrade bool
// RunTasksOptions defines parameters for task execution, e.g. retry interval
RunTasksOptions *fi.RunTasksOptions
// The channel we are using
channel *kops.Channel
// Phase can be set to a Phase to run the specific subset of tasks, if we don't want to run everything
Phase Phase
// LifecycleOverrides is passed in to override the lifecycle for one of more tasks.
// The key value is the task name such as InternetGateway and the value is the fi.Lifecycle
// that is re-mapped.
LifecycleOverrides map[string]fi.Lifecycle
// TaskMap is the map of tasks that we built (output)
TaskMap map[string]fi.Task
}
func (c *ApplyClusterCmd) Run(ctx context.Context) error {
if c.InstanceGroups == nil {
list, err := c.Clientset.InstanceGroupsFor(c.Cluster).List(ctx, metav1.ListOptions{})
if err != nil {
return err
}
var instanceGroups []*kops.InstanceGroup
for i := range list.Items {
instanceGroups = append(instanceGroups, &list.Items[i])
}
c.InstanceGroups = instanceGroups
}
for _, ig := range c.InstanceGroups {
// Try to guess the path for additional third party volume plugins in Flatcar
image := strings.ToLower(ig.Spec.Image)
if strings.Contains(image, "flatcar") {
if c.Cluster.Spec.Kubelet == nil {
c.Cluster.Spec.Kubelet = &kops.KubeletConfigSpec{}
}
if c.Cluster.Spec.Kubelet.VolumePluginDirectory == "" {
c.Cluster.Spec.Kubelet.VolumePluginDirectory = "/var/lib/kubelet/volumeplugins/"
}
}
}
modelStore, err := findModelStore()
if err != nil {
return err
}
channel, err := ChannelForCluster(c.Cluster)
if err != nil {
klog.Warningf("%v", err)
}
c.channel = channel
stageAssetsLifecycle := fi.LifecycleSync
securityLifecycle := fi.LifecycleSync
networkLifecycle := fi.LifecycleSync
clusterLifecycle := fi.LifecycleSync
switch c.Phase {
case Phase(""):
// Everything ... the default
// until we implement finding assets we need to Ignore them
stageAssetsLifecycle = fi.LifecycleIgnore
case PhaseStageAssets:
networkLifecycle = fi.LifecycleIgnore
securityLifecycle = fi.LifecycleIgnore
clusterLifecycle = fi.LifecycleIgnore
case PhaseNetwork:
stageAssetsLifecycle = fi.LifecycleIgnore
securityLifecycle = fi.LifecycleIgnore
clusterLifecycle = fi.LifecycleIgnore
case PhaseSecurity:
stageAssetsLifecycle = fi.LifecycleIgnore
networkLifecycle = fi.LifecycleExistsAndWarnIfChanges
clusterLifecycle = fi.LifecycleIgnore
case PhaseCluster:
if c.TargetName == TargetDryRun {
stageAssetsLifecycle = fi.LifecycleIgnore
securityLifecycle = fi.LifecycleExistsAndWarnIfChanges
networkLifecycle = fi.LifecycleExistsAndWarnIfChanges
} else {
stageAssetsLifecycle = fi.LifecycleIgnore
networkLifecycle = fi.LifecycleExistsAndValidates
securityLifecycle = fi.LifecycleExistsAndValidates
}
default:
return fmt.Errorf("unknown phase %q", c.Phase)
}
// This is kinda a hack. Need to move phases out of fi. If we use Phase here we introduce a circular
// go dependency.
phase := string(c.Phase)
assetBuilder := assets.NewAssetBuilder(c.Cluster, phase)
err = c.upgradeSpecs(assetBuilder)
if err != nil {
return err
}
err = c.validateKopsVersion()
if err != nil {
return err
}
err = c.validateKubernetesVersion()
if err != nil {
return err
}
cluster := c.Cluster
configBase, err := vfs.Context.BuildVfsPath(cluster.Spec.ConfigBase)
if err != nil {
return fmt.Errorf("error parsing config base %q: %v", cluster.Spec.ConfigBase, err)
}
if !c.AllowKopsDowngrade {
kopsVersionUpdatedBytes, err := configBase.Join(registry.PathKopsVersionUpdated).ReadFile()
if err == nil {
kopsVersionUpdated := strings.TrimSpace(string(kopsVersionUpdatedBytes))
version, err := semver.Parse(kopsVersionUpdated)
if err != nil {
return fmt.Errorf("error parsing last kops version updated: %v", err)
}
if version.GT(semver.MustParse(kopsbase.Version)) {
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
fmt.Printf("The cluster was last updated by kops version %s\n", kopsVersionUpdated)
fmt.Printf("To permit updating by the older version %s, run with the --allow-kops-downgrade flag\n", kopsbase.Version)
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
return fmt.Errorf("kops version older than last used to update the cluster")
}
} else if err != os.ErrNotExist {
return fmt.Errorf("error reading last kops version used to update: %v", err)
}
}
cloud, err := BuildCloud(cluster)
if err != nil {
return err
}
err = validation.DeepValidate(c.Cluster, c.InstanceGroups, true, cloud)
if err != nil {
return err
}
if cluster.Spec.KubernetesVersion == "" {
return fmt.Errorf("KubernetesVersion not set")
}
if cluster.Spec.DNSZone == "" && !dns.IsGossipHostname(cluster.ObjectMeta.Name) {
return fmt.Errorf("DNSZone not set")
}
l := &Loader{}
l.Init()
l.Cluster = c.Cluster
keyStore, err := c.Clientset.KeyStore(cluster)
if err != nil {
return err
}
sshCredentialStore, err := c.Clientset.SSHCredentialStore(cluster)
if err != nil {
return err
}
secretStore, err := c.Clientset.SecretStore(cluster)
if err != nil {
return err
}
// Normalize k8s version
versionWithoutV := strings.TrimSpace(cluster.Spec.KubernetesVersion)
versionWithoutV = strings.TrimPrefix(versionWithoutV, "v")
if cluster.Spec.KubernetesVersion != versionWithoutV {
klog.Warningf("Normalizing kubernetes version: %q -> %q", cluster.Spec.KubernetesVersion, versionWithoutV)
cluster.Spec.KubernetesVersion = versionWithoutV
}
// check if we should recommend turning off anonymousAuth
{
// we do a check here because setting modifying the kubelet object messes with the output
warn := false
if cluster.Spec.Kubelet == nil {
warn = true
} else if cluster.Spec.Kubelet.AnonymousAuth == nil {
warn = true
}
if warn {
fmt.Println("")
fmt.Printf("%s\n", starline)
fmt.Println("")
fmt.Println("Kubelet anonymousAuth is currently turned on. This allows RBAC escalation and remote code execution possibilities.")
fmt.Println("It is highly recommended you turn it off by setting 'spec.kubelet.anonymousAuth' to 'false' via 'kops edit cluster'")
fmt.Println("")
fmt.Println("See https://kops.sigs.k8s.io/security/#kubelet-api")
fmt.Println("")
fmt.Printf("%s\n", starline)
fmt.Println("")
}
}
if err := c.addFileAssets(assetBuilder); err != nil {
return err
}
// Only setup transfer of kops assets if using a FileRepository
if c.Cluster.Spec.Assets != nil && c.Cluster.Spec.Assets.FileRepository != nil {
if err := SetKopsAssetsLocations(assetBuilder); err != nil {
return err
}
}
checkExisting := true
region := ""
project := ""
var sshPublicKeys [][]byte
{
keys, err := sshCredentialStore.FindSSHPublicKeys(fi.SecretNameSSHPrimary)
if err != nil {
return fmt.Errorf("error retrieving SSH public key %q: %v", fi.SecretNameSSHPrimary, err)
}
for _, k := range keys {
sshPublicKeys = append(sshPublicKeys, []byte(k.Spec.PublicKey))
}
}
modelContext := &model.KopsModelContext{
IAMModelContext: iam.IAMModelContext{Cluster: cluster},
InstanceGroups: c.InstanceGroups,
}
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderGCE:
{
gceCloud := cloud.(gce.GCECloud)
region = gceCloud.Region()
project = gceCloud.Project()
if !AlphaAllowGCE.Enabled() {
return fmt.Errorf("GCE support is currently alpha, and is feature-gated. export KOPS_FEATURE_FLAGS=AlphaAllowGCE")
}
modelContext.SSHPublicKeys = sshPublicKeys
}
case kops.CloudProviderDO:
{
if !AlphaAllowDO.Enabled() {
return fmt.Errorf("DigitalOcean support is currently (very) alpha and is feature-gated. export KOPS_FEATURE_FLAGS=AlphaAllowDO to enable it")
}
if len(sshPublicKeys) == 0 && (c.Cluster.Spec.SSHKeyName == nil || *c.Cluster.Spec.SSHKeyName == "") {
return fmt.Errorf("SSH public key must be specified when running with DigitalOcean (create with `kops create secret --name %s sshpublickey admin -i ~/.ssh/id_rsa.pub`)", cluster.ObjectMeta.Name)
}
modelContext.SSHPublicKeys = sshPublicKeys
}
case kops.CloudProviderAWS:
{
awsCloud := cloud.(awsup.AWSCloud)
region = awsCloud.Region()
if len(sshPublicKeys) == 0 && c.Cluster.Spec.SSHKeyName == nil {
return fmt.Errorf("SSH public key must be specified when running with AWS (create with `kops create secret --name %s sshpublickey admin -i ~/.ssh/id_rsa.pub`)", cluster.ObjectMeta.Name)
}
modelContext.SSHPublicKeys = sshPublicKeys
if len(sshPublicKeys) > 1 {
return fmt.Errorf("exactly one 'admin' SSH public key can be specified when running with AWS; please delete a key using `kops delete secret`")
}
}
case kops.CloudProviderALI:
{
if !AlphaAllowALI.Enabled() {
return fmt.Errorf("aliyun support is currently alpha, and is feature-gated. export KOPS_FEATURE_FLAGS=AlphaAllowALI")
}
aliCloud := cloud.(aliup.ALICloud)
region = aliCloud.Region()
if len(sshPublicKeys) == 0 {
return fmt.Errorf("SSH public key must be specified when running with ALICloud (create with `kops create secret --name %s sshpublickey admin -i ~/.ssh/id_rsa.pub`)", cluster.ObjectMeta.Name)
}
modelContext.SSHPublicKeys = sshPublicKeys
if len(sshPublicKeys) != 1 {
return fmt.Errorf("exactly one 'admin' SSH public key can be specified when running with ALICloud; please delete a key using `kops delete secret`")
}
}
case kops.CloudProviderOpenstack:
{
osCloud := cloud.(openstack.OpenstackCloud)
region = osCloud.Region()
if len(sshPublicKeys) == 0 {
return fmt.Errorf("SSH public key must be specified when running with Openstack (create with `kops create secret --name %s sshpublickey admin -i ~/.ssh/id_rsa.pub`)", cluster.ObjectMeta.Name)
}
modelContext.SSHPublicKeys = sshPublicKeys
if len(sshPublicKeys) != 1 {
return fmt.Errorf("exactly one 'admin' SSH public key can be specified when running with Openstack; please delete a key using `kops delete secret`")
}
}
default:
return fmt.Errorf("unknown CloudProvider %q", cluster.Spec.CloudProvider)
}
modelContext.Region = region
if dns.IsGossipHostname(cluster.ObjectMeta.Name) {
klog.Infof("Gossip DNS: skipping DNS validation")
} else {
err = validateDNS(cluster, cloud)
if err != nil {
return err
}
}
tf := &TemplateFunctions{
KopsModelContext: *modelContext,
}
{
templates, err := templates.LoadTemplates(cluster, models.NewAssetPath("cloudup/resources"))
if err != nil {
return fmt.Errorf("error loading templates: %v", err)
}
err = tf.AddTo(templates.TemplateFunctions, secretStore)
if err != nil {
return err
}
l.Builders = append(l.Builders,
&BootstrapChannelBuilder{
KopsModelContext: modelContext,
Lifecycle: &clusterLifecycle,
assetBuilder: assetBuilder,
templates: templates,
},
&model.PKIModelBuilder{
KopsModelContext: modelContext,
Lifecycle: &clusterLifecycle,
},
&kubeapiserver.KubeApiserverBuilder{
AssetBuilder: assetBuilder,
KopsModelContext: modelContext,
Lifecycle: &clusterLifecycle,
},
&etcdmanager.EtcdManagerBuilder{
AssetBuilder: assetBuilder,
KopsModelContext: modelContext,
Lifecycle: &clusterLifecycle,
},
)
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderAWS:
awsModelContext := &awsmodel.AWSModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&awsmodel.APILoadBalancerBuilder{AWSModelContext: awsModelContext, Lifecycle: &clusterLifecycle, SecurityLifecycle: &securityLifecycle},
&model.BastionModelBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle, SecurityLifecycle: &securityLifecycle},
&model.DNSModelBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&model.ExternalAccessModelBuilder{KopsModelContext: modelContext, Lifecycle: &securityLifecycle},
&model.FirewallModelBuilder{KopsModelContext: modelContext, Lifecycle: &securityLifecycle},
&model.SSHKeyModelBuilder{KopsModelContext: modelContext, Lifecycle: &securityLifecycle},
)
l.Builders = append(l.Builders,
&model.NetworkModelBuilder{KopsModelContext: modelContext, Lifecycle: &networkLifecycle},
)
l.Builders = append(l.Builders,
&model.IAMModelBuilder{KopsModelContext: modelContext, Lifecycle: &securityLifecycle},
)
case kops.CloudProviderDO:
doModelContext := &domodel.DOModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&domodel.APILoadBalancerModelBuilder{DOModelContext: doModelContext, Lifecycle: &securityLifecycle},
)
case kops.CloudProviderGCE:
gceModelContext := &gcemodel.GCEModelContext{
KopsModelContext: modelContext,
}
storageACLLifecycle := securityLifecycle
if storageACLLifecycle != fi.LifecycleIgnore {
// This is a best-effort permissions fix
storageACLLifecycle = fi.LifecycleWarnIfInsufficientAccess
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&gcemodel.APILoadBalancerBuilder{GCEModelContext: gceModelContext, Lifecycle: &securityLifecycle},
&gcemodel.ExternalAccessModelBuilder{GCEModelContext: gceModelContext, Lifecycle: &securityLifecycle},
&gcemodel.FirewallModelBuilder{GCEModelContext: gceModelContext, Lifecycle: &securityLifecycle},
&gcemodel.NetworkModelBuilder{GCEModelContext: gceModelContext, Lifecycle: &networkLifecycle},
)
l.Builders = append(l.Builders,
&gcemodel.StorageAclBuilder{GCEModelContext: gceModelContext, Cloud: cloud.(gce.GCECloud), Lifecycle: &storageACLLifecycle},
)
case kops.CloudProviderALI:
aliModelContext := &alimodel.ALIModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
&alimodel.APILoadBalancerModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.NetworkModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.RAMModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.SSHKeyModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.FirewallModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
&alimodel.ExternalAccessModelBuilder{ALIModelContext: aliModelContext, Lifecycle: &clusterLifecycle},
)
case kops.CloudProviderOpenstack:
openstackModelContext := &openstackmodel.OpenstackModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: &clusterLifecycle},
// &openstackmodel.APILBModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: &clusterLifecycle},
&openstackmodel.NetworkModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: &networkLifecycle},
&openstackmodel.SSHKeyModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: &securityLifecycle},
&openstackmodel.FirewallModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: &securityLifecycle},
)
default:
return fmt.Errorf("unknown cloudprovider %q", cluster.Spec.CloudProvider)
}
}
l.TemplateFunctions["CA"] = func() fi.CAStore {
return keyStore
}
l.TemplateFunctions["Secrets"] = func() fi.SecretStore {
return secretStore
}
configBuilder, err := c.newNodeUpConfigBuilder(assetBuilder)
if err != nil {
return err
}
bootstrapScriptBuilder := &model.BootstrapScriptBuilder{
NodeUpConfigBuilder: configBuilder,
NodeUpSource: c.NodeUpSource,
NodeUpSourceHash: c.NodeUpHash,
}
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderAWS:
{
awsModelContext := &awsmodel.AWSModelContext{
KopsModelContext: modelContext,
}
awsModelBuilder := &awsmodel.AutoscalingGroupModelBuilder{
AWSModelContext: awsModelContext,
BootstrapScriptBuilder: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
SecurityLifecycle: &securityLifecycle,
}
if featureflag.Spotinst.Enabled() {
l.Builders = append(l.Builders, &spotinstmodel.InstanceGroupModelBuilder{
KopsModelContext: modelContext,
BootstrapScriptBuilder: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
SecurityLifecycle: &securityLifecycle,
})
if featureflag.SpotinstHybrid.Enabled() {
l.Builders = append(l.Builders, awsModelBuilder)
}
} else {
l.Builders = append(l.Builders, awsModelBuilder)
}
}
case kops.CloudProviderDO:
doModelContext := &domodel.DOModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders, &domodel.DropletBuilder{
DOModelContext: doModelContext,
BootstrapScriptBuilder: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
})
case kops.CloudProviderGCE:
{
gceModelContext := &gcemodel.GCEModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders, &gcemodel.AutoscalingGroupModelBuilder{
GCEModelContext: gceModelContext,
BootstrapScriptBuilder: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
})
}
case kops.CloudProviderALI:
{
aliModelContext := &alimodel.ALIModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders, &alimodel.ScalingGroupModelBuilder{
ALIModelContext: aliModelContext,
BootstrapScriptBuilder: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
})
}
case kops.CloudProviderOpenstack:
openstackModelContext := &openstackmodel.OpenstackModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders, &openstackmodel.ServerGroupModelBuilder{
OpenstackModelContext: openstackModelContext,
BootstrapScriptBuilder: bootstrapScriptBuilder,
Lifecycle: &clusterLifecycle,
})
default:
return fmt.Errorf("unknown cloudprovider %q", cluster.Spec.CloudProvider)
}
l.TemplateFunctions["Masters"] = tf.MasterInstanceGroups
err = tf.AddTo(l.TemplateFunctions, secretStore)
if err != nil {
return err
}
taskMap, err := l.BuildTasks(modelStore, assetBuilder, &stageAssetsLifecycle, c.LifecycleOverrides)
if err != nil {
return fmt.Errorf("error building tasks: %v", err)
}
c.TaskMap = taskMap
var target fi.Target
dryRun := false
shouldPrecreateDNS := true
switch c.TargetName {
case TargetDirect:
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderGCE:
target = gce.NewGCEAPITarget(cloud.(gce.GCECloud))
case kops.CloudProviderAWS:
target = awsup.NewAWSAPITarget(cloud.(awsup.AWSCloud))
case kops.CloudProviderDO:
target = do.NewDOAPITarget(cloud.(*digitalocean.Cloud))
case kops.CloudProviderOpenstack:
target = openstack.NewOpenstackAPITarget(cloud.(openstack.OpenstackCloud))
case kops.CloudProviderALI:
target = aliup.NewALIAPITarget(cloud.(aliup.ALICloud))
default:
return fmt.Errorf("direct configuration not supported with CloudProvider:%q", cluster.Spec.CloudProvider)
}
case TargetTerraform:
checkExisting = false
outDir := c.OutDir
tfVersion := terraform.Version011
if featureflag.Terraform012.Enabled() {
tfVersion = terraform.Version012
}
tf := terraform.NewTerraformTarget(cloud, region, project, outDir, tfVersion, cluster.Spec.Target)
// We include a few "util" variables in the TF output
if err := tf.AddOutputVariable("region", terraform.LiteralFromStringValue(region)); err != nil {
return err
}
if project != "" {
if err := tf.AddOutputVariable("project", terraform.LiteralFromStringValue(project)); err != nil {
return err
}
}
if err := tf.AddOutputVariable("cluster_name", terraform.LiteralFromStringValue(cluster.ObjectMeta.Name)); err != nil {
return err
}
target = tf
// Can cause conflicts with terraform management
shouldPrecreateDNS = false
case TargetCloudformation:
checkExisting = false
outDir := c.OutDir
target = cloudformation.NewCloudformationTarget(cloud, region, project, outDir)
// Can cause conflicts with cloudformation management
shouldPrecreateDNS = false
case TargetDryRun:
target = fi.NewDryRunTarget(assetBuilder, os.Stdout)
dryRun = true
// Avoid making changes on a dry-run
shouldPrecreateDNS = false
default:
return fmt.Errorf("unsupported target type %q", c.TargetName)
}
c.Target = target
if !dryRun {
acl, err := acls.GetACL(configBase, cluster)
if err != nil {
return err
}
err = configBase.Join(registry.PathKopsVersionUpdated).WriteFile(bytes.NewReader([]byte(kopsbase.Version)), acl)
if err != nil {
return fmt.Errorf("error writing kops version: %v", err)
}
err = registry.WriteConfigDeprecated(cluster, configBase.Join(registry.PathClusterCompleted), c.Cluster)
if err != nil {
return fmt.Errorf("error writing completed cluster spec: %v", err)
}
vfsMirror := vfsclientset.NewInstanceGroupMirror(cluster, configBase)
for _, g := range c.InstanceGroups {
// TODO: We need to update the mirror (below), but do we need to update the primary?
_, err := c.Clientset.InstanceGroupsFor(c.Cluster).Update(ctx, g, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("error writing InstanceGroup %q to registry: %v", g.ObjectMeta.Name, err)
}
// TODO: Don't write if vfsMirror == c.ClientSet
if err := vfsMirror.WriteMirror(g); err != nil {
return fmt.Errorf("error writing instance group spec to mirror: %v", err)
}
}
}
context, err := fi.NewContext(target, cluster, cloud, keyStore, secretStore, configBase, checkExisting, taskMap)
if err != nil {
return fmt.Errorf("error building context: %v", err)
}
defer context.Close()
var options fi.RunTasksOptions
if c.RunTasksOptions != nil {
options = *c.RunTasksOptions
} else {
options.InitDefaults()
}
err = context.RunTasks(options)
if err != nil {
return fmt.Errorf("error running tasks: %v", err)
}
if dns.IsGossipHostname(cluster.Name) {
shouldPrecreateDNS = false
}
if shouldPrecreateDNS {
if err := precreateDNS(ctx, cluster, cloud); err != nil {
klog.Warningf("unable to pre-create DNS records - cluster startup may be slower: %v", err)
}
}
err = target.Finish(taskMap) //This will finish the apply, and print the changes
if err != nil {
return fmt.Errorf("error closing target: %v", err)
}
return nil
}
// upgradeSpecs ensures that fields are fully populated / defaulted
func (c *ApplyClusterCmd) upgradeSpecs(assetBuilder *assets.AssetBuilder) error {
fullCluster, err := PopulateClusterSpec(c.Clientset, c.Cluster, assetBuilder)
if err != nil {
return err
}
c.Cluster = fullCluster
for i, g := range c.InstanceGroups {
fullGroup, err := PopulateInstanceGroupSpec(fullCluster, g, c.channel)
if err != nil {
return err
}
c.InstanceGroups[i] = fullGroup
}
return nil
}
// validateKopsVersion ensures that kops meet the version requirements / recommendations in the channel
func (c *ApplyClusterCmd) validateKopsVersion() error {
kopsVersion, err := semver.ParseTolerant(kopsbase.Version)
if err != nil {
klog.Warningf("unable to parse kops version %q", kopsbase.Version)
// Not a hard-error
return nil
}
if c.channel == nil {
klog.Warning("channel unavailable, skipping version validation")
return nil
}
versionInfo := kops.FindKopsVersionSpec(c.channel.Spec.KopsVersions, kopsVersion)
if versionInfo == nil {
klog.Warningf("unable to find version information for kops version %q in channel", kopsVersion)
// Not a hard-error
return nil
}
recommended, err := versionInfo.FindRecommendedUpgrade(kopsVersion)
if err != nil {
klog.Warningf("unable to parse version recommendation for kops version %q in channel", kopsVersion)
}
required, err := versionInfo.IsUpgradeRequired(kopsVersion)
if err != nil {
klog.Warningf("unable to parse version requirement for kops version %q in channel", kopsVersion)
}
if recommended != nil && !required {
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
fmt.Printf("A new kops version is available: %s", recommended)
fmt.Printf("\n")
fmt.Printf("Upgrading is recommended\n")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_kops", recommended.String()))
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
} else if required {
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
if recommended != nil {
fmt.Printf("a new kops version is available: %s\n", recommended)
}
fmt.Println("")
fmt.Printf("This version of kops (%s) is no longer supported; upgrading is required\n", kopsbase.Version)
fmt.Printf("(you can bypass this check by exporting KOPS_RUN_OBSOLETE_VERSION)\n")
fmt.Println("")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_kops", recommended.String()))
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
}
if required {
if os.Getenv("KOPS_RUN_OBSOLETE_VERSION") == "" {
return fmt.Errorf("kops upgrade is required")
}
}
return nil
}
// validateKubernetesVersion ensures that kubernetes meet the version requirements / recommendations in the channel
func (c *ApplyClusterCmd) validateKubernetesVersion() error {
parsed, err := util.ParseKubernetesVersion(c.Cluster.Spec.KubernetesVersion)
if err != nil {
klog.Warningf("unable to parse kubernetes version %q", c.Cluster.Spec.KubernetesVersion)
// Not a hard-error
return nil
}
kopsVersion, err := semver.Parse(kopsbase.KOPS_RELEASE_VERSION)
if err != nil {
klog.Warningf("unable to parse kops version %q", kopsVersion)
} else {
tooNewVersion := kopsVersion
tooNewVersion.Minor++
tooNewVersion.Pre = nil
tooNewVersion.Build = nil
if util.IsKubernetesGTE(tooNewVersion.String(), *parsed) {
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
fmt.Printf("This version of kubernetes is not yet supported; upgrading kops is required\n")
fmt.Printf("(you can bypass this check by exporting KOPS_RUN_TOO_NEW_VERSION)\n")
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
if os.Getenv("KOPS_RUN_TOO_NEW_VERSION") == "" {
return fmt.Errorf("kops upgrade is required")
}
}
}
if !util.IsKubernetesGTE(OldestSupportedKubernetesVersion, *parsed) {
fmt.Printf("This version of Kubernetes is no longer supported; upgrading Kubernetes is required\n")
fmt.Printf("\n")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_k8s", OldestRecommendedKubernetesVersion))
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
return fmt.Errorf("kubernetes upgrade is required")
}
if !util.IsKubernetesGTE(OldestRecommendedKubernetesVersion, *parsed) {
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
fmt.Printf("Kops support for this Kubernetes version is deprecated and will be removed in a future release.\n")
fmt.Printf("\n")
fmt.Printf("Upgrading Kubernetes is recommended\n")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_k8s", OldestRecommendedKubernetesVersion))
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
}
// TODO: make util.ParseKubernetesVersion not return a pointer
kubernetesVersion := *parsed
if c.channel == nil {
klog.Warning("unable to load channel, skipping kubernetes version recommendation/requirements checks")
return nil
}
versionInfo := kops.FindKubernetesVersionSpec(c.channel.Spec.KubernetesVersions, kubernetesVersion)
if versionInfo == nil {
klog.Warningf("unable to find version information for kubernetes version %q in channel", kubernetesVersion)
// Not a hard-error
return nil
}
recommended, err := versionInfo.FindRecommendedUpgrade(kubernetesVersion)
if err != nil {
klog.Warningf("unable to parse version recommendation for kubernetes version %q in channel", kubernetesVersion)
}
required, err := versionInfo.IsUpgradeRequired(kubernetesVersion)
if err != nil {
klog.Warningf("unable to parse version requirement for kubernetes version %q in channel", kubernetesVersion)
}
if recommended != nil && !required {
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
fmt.Printf("A new kubernetes version is available: %s\n", recommended)
fmt.Printf("Upgrading is recommended (try kops upgrade cluster)\n")
fmt.Printf("\n")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_k8s", recommended.String()))
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
} else if required {
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
if recommended != nil {
fmt.Printf("A new kubernetes version is available: %s\n", recommended)
}
fmt.Printf("\n")
fmt.Printf("This version of kubernetes is no longer supported; upgrading is required\n")
fmt.Printf("(you can bypass this check by exporting KOPS_RUN_OBSOLETE_VERSION)\n")
fmt.Printf("\n")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_k8s", recommended.String()))
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
}
if required {
if os.Getenv("KOPS_RUN_OBSOLETE_VERSION") == "" {
return fmt.Errorf("kubernetes upgrade is required")
}
}
return nil
}
// addFileAssets adds the file assets within the assetBuilder
func (c *ApplyClusterCmd) addFileAssets(assetBuilder *assets.AssetBuilder) error {
var baseURL string
if components.IsBaseURL(c.Cluster.Spec.KubernetesVersion) {
baseURL = c.Cluster.Spec.KubernetesVersion
} else {
baseURL = "https://storage.googleapis.com/kubernetes-release/release/v" + c.Cluster.Spec.KubernetesVersion
}
c.Assets = make(map[architectures.Architecture][]*MirroredAsset)
c.NodeUpSource = make(map[architectures.Architecture]string)
c.NodeUpHash = make(map[architectures.Architecture]string)
for _, arch := range architectures.GetSupported() {
c.Assets[arch] = []*MirroredAsset{}
c.NodeUpSource[arch] = ""
c.NodeUpHash[arch] = ""
k8sAssetsNames := []string{
fmt.Sprintf("/bin/linux/%s/kubelet", arch),
fmt.Sprintf("/bin/linux/%s/kubectl", arch),
}
if needsMounterAsset(c.Cluster, c.InstanceGroups) {
k8sAssetsNames = append(k8sAssetsNames, fmt.Sprintf("/bin/linux/%s/mounter", arch))
}
for _, an := range k8sAssetsNames {
k, err := url.Parse(baseURL)
if err != nil {
return err
}
k.Path = path.Join(k.Path, an)
u, hash, err := assetBuilder.RemapFileAndSHA(k)
if err != nil {
return err
}
c.Assets[arch] = append(c.Assets[arch], BuildMirroredAsset(u, hash))
}
cniAsset, cniAssetHash, err := findCNIAssets(c.Cluster, assetBuilder, arch)
if err != nil {
return err
}
c.Assets[arch] = append(c.Assets[arch], BuildMirroredAsset(cniAsset, cniAssetHash))
if c.Cluster.Spec.Networking.LyftVPC != nil {
lyftAsset, lyftAssetHash, err := findLyftVPCAssets(c.Cluster, assetBuilder, arch)
if err != nil {
return err
}
c.Assets[arch] = append(c.Assets[arch], BuildMirroredAsset(lyftAsset, lyftAssetHash))
}
asset, err := NodeUpAsset(assetBuilder, arch)
if err != nil {
return err
}
c.NodeUpSource[arch] = strings.Join(asset.Locations, ",")
c.NodeUpHash[arch] = asset.Hash.Hex()
// TODO: Update Kops version in integration tests to 1.19.0 after it is released
// Integration tests fake the Kops version to 1.19.0-alpha.1 and will not be able to find Protokube
if kopsbase.Version != "1.19.0-alpha.1" {
// Explicitly add the protokube image,
// otherwise when the Target is DryRun this asset is not added
// Is there a better way to call this?
_, _, err = ProtokubeImageSource(assetBuilder, arch)
if err != nil {
return err
}
}
}
return nil
}
// buildPermalink returns a link to our "permalink docs", to further explain an error message
func buildPermalink(key, anchor string) string {
url := "https://github.com/kubernetes/kops/blob/master/permalinks/" + key + ".md"
if anchor != "" {
url += "#" + anchor
}
return url
}
func ChannelForCluster(c *kops.Cluster) (*kops.Channel, error) {
channelLocation := c.Spec.Channel
if channelLocation == "" {
channelLocation = kops.DefaultChannel
}
return kops.LoadChannel(channelLocation)
}
// needsMounterAsset checks if we need the mounter program
// This is only needed currently on ContainerOS i.e. GCE, but we don't have a nice way to detect it yet
func needsMounterAsset(c *kops.Cluster, instanceGroups []*kops.InstanceGroup) bool {
// TODO: Do real detection of ContainerOS (but this has to work with image names, and maybe even forked images)
switch kops.CloudProviderID(c.Spec.CloudProvider) {
case kops.CloudProviderGCE:
return true
default:
return false
}
}
type nodeUpConfigBuilder struct {
*ApplyClusterCmd
assetBuilder *assets.AssetBuilder
channels []string
configBase vfs.Path
cluster *kops.Cluster
etcdManifests map[kops.InstanceGroupRole][]string
images map[kops.InstanceGroupRole]map[architectures.Architecture][]*nodeup.Image
protokubeImage map[kops.InstanceGroupRole]map[architectures.Architecture]*nodeup.Image
}
func (c *ApplyClusterCmd) newNodeUpConfigBuilder(assetBuilder *assets.AssetBuilder) (model.NodeUpConfigBuilder, error) {
cluster := c.Cluster
configBase, err := vfs.Context.BuildVfsPath(cluster.Spec.ConfigBase)
if err != nil {
return nil, fmt.Errorf("error parsing config base %q: %v", cluster.Spec.ConfigBase, err)
}
channels := []string{
configBase.Join("addons", "bootstrap-channel.yaml").Path(),
}
for i := range cluster.Spec.Addons {
channels = append(channels, cluster.Spec.Addons[i].Manifest)
}
useGossip := dns.IsGossipHostname(cluster.Spec.MasterInternalName)
etcdManifests := map[kops.InstanceGroupRole][]string{}
images := map[kops.InstanceGroupRole]map[architectures.Architecture][]*nodeup.Image{}
protokubeImage := map[kops.InstanceGroupRole]map[architectures.Architecture]*nodeup.Image{}
for _, role := range kops.AllInstanceGroupRoles {
isMaster := role == kops.InstanceGroupRoleMaster
images[role] = make(map[architectures.Architecture][]*nodeup.Image)
if components.IsBaseURL(cluster.Spec.KubernetesVersion) {
// When using a custom version, we want to preload the images over http
components := []string{"kube-proxy"}
if isMaster {
components = append(components, "kube-apiserver", "kube-controller-manager", "kube-scheduler")
}
for _, arch := range architectures.GetSupported() {
for _, component := range components {
baseURL, err := url.Parse(cluster.Spec.KubernetesVersion)
if err != nil {
return nil, err
}
baseURL.Path = path.Join(baseURL.Path, "/bin/linux", string(arch), component+".tar")
u, hash, err := assetBuilder.RemapFileAndSHA(baseURL)
if err != nil {
return nil, err
}
image := &nodeup.Image{
Sources: []string{u.String()},
Hash: hash.Hex(),
}
images[role][arch] = append(images[role][arch], image)
}
}
}
// `docker load` our images when using a KOPS_BASE_URL, so we
// don't need to push/pull from a registry
if os.Getenv("KOPS_BASE_URL") != "" && isMaster {
for _, arch := range architectures.GetSupported() {
for _, name := range []string{"kops-controller", "dns-controller", "kube-apiserver-healthcheck"} {
baseURL, err := url.Parse(os.Getenv("KOPS_BASE_URL"))
if err != nil {
return nil, err
}
baseURL.Path = path.Join(baseURL.Path, "/images/"+name+"-"+string(arch)+".tar.gz")
u, hash, err := assetBuilder.RemapFileAndSHA(baseURL)
if err != nil {
return nil, err
}
image := &nodeup.Image{
Sources: []string{u.String()},
Hash: hash.Hex(),
}
images[role][arch] = append(images[role][arch], image)
}
}
}
if isMaster || useGossip {
// TODO: Update Kops version in integration tests to 1.19.0 after it is released
// Integration tests fake the Kops version to 1.19.0-alpha.1 and will not be able to find Protokube
if kopsbase.Version != "1.19.0-alpha.1" {
protokubeImage[role] = make(map[architectures.Architecture]*nodeup.Image)
for _, arch := range architectures.GetSupported() {
u, hash, err := ProtokubeImageSource(assetBuilder, arch)
if err != nil {
return nil, err
}
asset := BuildMirroredAsset(u, hash)
protokubeImage[role][arch] = &nodeup.Image{
Name: kopsbase.DefaultProtokubeImageName(),
Sources: asset.Locations,
Hash: asset.Hash.Hex(),
}
}
}
}
if role == kops.InstanceGroupRoleMaster {
for _, etcdCluster := range cluster.Spec.EtcdClusters {
if etcdCluster.Provider == kops.EtcdProviderTypeManager {
p := configBase.Join("manifests/etcd/" + etcdCluster.Name + ".yaml").Path()
etcdManifests[role] = append(etcdManifests[role], p)
}
}
}
}
configBuilder := nodeUpConfigBuilder{
ApplyClusterCmd: c,
assetBuilder: assetBuilder,
channels: channels,
configBase: configBase,
cluster: cluster,
etcdManifests: etcdManifests,
images: images,
protokubeImage: protokubeImage,
}
return &configBuilder, nil
}
// BuildNodeUpConfig returns the NodeUp config, in YAML format
func (n *nodeUpConfigBuilder) BuildConfig(ig *kops.InstanceGroup, apiserverAdditionalIPs []string) (*nodeup.Config, error) {
cluster := n.cluster
if ig == nil {
return nil, fmt.Errorf("instanceGroup cannot be nil")
}
role := ig.Spec.Role
if role == "" {
return nil, fmt.Errorf("cannot determine role for instance group: %v", ig.ObjectMeta.Name)
}
config := nodeup.NewConfig(cluster, ig)
config.Assets = make(map[architectures.Architecture][]string)
for _, arch := range architectures.GetSupported() {
config.Assets[arch] = []string{}
for _, a := range n.Assets[arch] {
config.Assets[arch] = append(config.Assets[arch], a.CompactString())
}
}
config.ClusterName = cluster.ObjectMeta.Name
config.ConfigBase = fi.String(n.configBase.Path())
config.InstanceGroupName = ig.ObjectMeta.Name
if role == kops.InstanceGroupRoleMaster {
config.ApiserverAdditionalIPs = apiserverAdditionalIPs
}
for _, manifest := range n.assetBuilder.StaticManifests {
match := false
for _, r := range manifest.Roles {
if r == role {
match = true
}
}
if !match {
continue
}
config.StaticManifests = append(config.StaticManifests, &nodeup.StaticManifest{
Key: manifest.Key,
Path: manifest.Path,
})
}
config.Images = n.images[role]
config.Channels = n.channels
config.EtcdManifests = n.etcdManifests[role]
config.ProtokubeImage = n.protokubeImage[role]
return config, nil
}
|
[
"\"KOPS_RUN_OBSOLETE_VERSION\"",
"\"KOPS_RUN_TOO_NEW_VERSION\"",
"\"KOPS_RUN_OBSOLETE_VERSION\"",
"\"KOPS_BASE_URL\"",
"\"KOPS_BASE_URL\""
] |
[] |
[
"KOPS_BASE_URL",
"KOPS_RUN_TOO_NEW_VERSION",
"KOPS_RUN_OBSOLETE_VERSION"
] |
[]
|
["KOPS_BASE_URL", "KOPS_RUN_TOO_NEW_VERSION", "KOPS_RUN_OBSOLETE_VERSION"]
|
go
| 3 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.