lang
stringclasses 10
values | seed
stringlengths 5
2.12k
|
---|---|
shell | #!/bin/bash
mkdir -p $GOPATH/src/github.com/rbastic/photosrv/storage
cp storage/storage.go $GOPATH/src/github.com/rbastic/photosrv/storage
|
shell | echo " [PlotPES]: COARSEAIR_WORKING_DIR = "${COARSEAIR_WORKING_DIR}
echo " [PlotPES]: COARSEAIR_OUTPUT_DIR = "${COARSEAIR_OUTPUT_DIR}
echo " [PlotPES]: COARSEAIR_INPUT_DIR = "${COARSEAIR_INPUT_DIR}
echo " [PlotPES]"
echo " [PlotPES]: Computing and Plotting Potential Energy Surface. Command: ${PlotPESCommand}"
cd ${COARSEAIR_OUTPUT_DIR}
eval ${PlotPESCommand} ${COARSEAIR_OUTPUT_DIR}
cd ..
endTot=`date +%s`
Totruntime=$((endTot-startTot)) |
shell | <gh_stars>0
git config --global user.email "<EMAIL>"
git config --global user.name "<NAME>"
npm install --global @angular/cli
ng new newproject --routing
ng add @angular/material |
shell | #!/bin/sh
set -e
# first arg is `-*` (picks up `--*`, too)
if [ "${1#-}" != "$1" ]; then
set -- /liquibase/liquibase "$@"
fi
exec "$@" |
shell | cd ~/git/llvm
git pull
git checkout cling-patches
cd ~/git/tools/cling
git pull
cd ~/git/tools/clang
git pull
git checkout cling-patches
cd ~/git/llvm_cling_build
|
shell | done
for hash in "${TORRENT_HASHES[@]}"
do
for port in "${PORTS[@]}"
do
if ! transmission-remote localhost:"$port" -t "$hash" -r > /dev/null 2>&1; then
response_error "Could not remove torrents"
fi
done |
shell | eval export ${i}_FLAGS+=\" --coverage\"
fi
done
echo "C_FLAGS = $C_FLAGS"
echo "CXX_FLAGS = $CXX_FLAGS"
# shellcheck disable=2154
echo "Fortran_FLAGS = $Fortran_FLAGS"
echo "CUDA_FLAGS = $CUDA_FLAGS"
# Tweak MPI settings
# export OMPI_MCA_btl=self,sm
export OMPI_MCA_btl=^openib
#--------------------------------------------------------------------------------------------------#
# Draco |
shell | <gh_stars>1-10
#!/bin/bash
docker stop circonus-irondb-datasource |
shell | #!/bin/bash
python merge_and_convert_to_html.py filelist.txt 1:205780000-205940000 spirometry_in_ukbb
|
shell | <gh_stars>1-10
#!/bin/bash
CFG=$1
./hal/hal -l0 -f hal.out ./hal/$CFG &
|
shell | pandoc --version &> /dev/null \
|| (echo "Pandoc must be installed to run tests" && exit 0)
pandoc -o test-default.docx -F ../bin/docx-title.js test.md \
|| (echo "FAIL: default template" && exit 1)
pandoc -o test-custom.docx -F ../bin/docx-title.js -M docx-title=./title.xml test.md \ |
shell | ${PRIN} " %b %s ... \n" "${INFO}" "Updating repo"
yum update -y || error "Update failed !"
${PRIN} " %b %s " "${INFO}" "Update repo"
${PRIN} "%b" "${DONE}"
${SLP}
${PRIN} " %b\\n" "${TICK}"
# Upgrade
${PRIN} " %b %s ... \n" "${INFO}" "Upgrading packages"
yum upgrade -y || error "Upgrade failed !"
${PRIN} " %b %s " "${INFO}" "Upgrade packages"
${PRIN} "%b" "${DONE}"
${SLP}
${PRIN} " %b\\n" "${TICK}"
# Install |
shell | lib/c++
{% endblock %}
{% block std_box %}
bld/bootbox
{% endblock %}
|
shell |
echo Enter in the source image
read -p 'Input Path: ' sourceImage
echo Enter in the output file path
read -p 'Output Path: ' outputImage
echo Enter in any effects! Start each effect with "--". Can also leave blank!
read -p 'Effects: ' effects |
shell | #/bin/bash
python train_linear_nongau_ts.py -e toy_linear_ts
python train_linear_nongau_ts.py -e toy_linear_ts
python train_linear_nongau_ts.py -e toy_linear_ts |
shell | ldapwhoami -Q | cut -d ":" -f2 | cut -d "," -f1,3,4,5,6
|
shell | #!/bin/bash
dotnet.exe ef database update
|
shell |
while [[ $# -gt 1 ]]
do
key="$1"
case $key in
-r|--root)
root="$2"
shift # past argument
;;
-e|--reports)
reports="$2"
shift # past argument |
shell | #!/bin/sh
apt-get update
apt-get upgrade
|
shell | fi
PID2=`ps -eaf | grep 'space.syncer/app/spacesyncer' | grep -v grep | awk '{print $2}'`
if [[ "" != "$PID2" ]]; then
echo "[s p a c e ] killing space.syncer PID = $PID2"
kill -9 $PID2
fi
. ~/.bash_profile
#nohup npm start > /dev/null 2>&1 &kill |
shell | read ip
export SERVER_IP=$ip
echo "Введите REPO_NAME"
read repo_name
export REPO_NAME=$repo_name
export DEPLOY_USER=deploy
bundle exec cap production deploy:initial
|
shell | proxy
server
EOL
return 0
}
function get_version()
{
local os_version=""
if [ -f /etc/os-release ]; then
. /etc/os-release
os_version="${VERSION_ID%%.*}"
elif [ -f /etc/redhat-release ]; then |
shell |
local -r DirHere=$PWD
local -r DirSrc=$BO_Home/sample_project/BriteOnyx
local -r DirTgt=$DirHere/BriteOnyx
echo "Updating/installing BriteOnyx content here in directory '$DirHere', from directory '$BO_Home'"
[[ -d "$DirTgt" ]] && rm -r $DirTgt
cp -fp $BO_Home/sample_project/activate.src $DirHere/activate.src
# Handle new content |
shell | #!/usr/bin/env bash
CURDIR=`readlink -f \`dirname $0\``
source ${CURDIR}/test_func.sh
VERSION=${1:-0.23.0}
download_enmasse_release ${VERSION}
|
shell | git checkout release-3.7-contrail || git checkout origin/release-3.7-contrail
popd
rm -rf openshift-ansible3.9
cp -R openshift-ansible openshift-ansible3.9
pushd openshift-ansible3.9
git checkout release-3.9-contrail || git checkout origin/release-3.9-contrail
popd
|
shell |
# create an azure container registry for images, admin enabled for docker login
az acr create --subscription $SUBID --resource-group $GROUPNAME --name $ACRNAME --sku Standard --admin-enabled true
# creating RBAC for Azure AD to permit build and pull from acr to aks cluster |
shell |
ARG
<label-name> Node label value for kubernetes.io/arch=amd64,kubernetes.io/hostname
EOF
}
if [ -z $1 ]; then
usage
exit 1
fi
|
shell |
node=box parameters=0,0,0,1000 boundary=$(tboolean-container)
node=$operation parameters=0,0,0,300 boundary=$(tboolean-testobject)
node=box parameters=0,0,0,$inscribe boundary=$(tboolean-testobject)
node=sphere parameters=0,0,0,200 boundary=$(tboolean-testobject) |
shell | if [ ! $ip ]
then
read -p "Puppet master fqdn : " fqdn
fi
timedatectl set-timezone Asia/Kolkata
yum -y install ntp net-tools
ntpdate pool.ntp.org
systemctl restart ntpd
systemctl enable ntpd
rpm -ivh https://yum.puppetlabs.com/puppetlabs-release-pc1-el-7.noarch.rpm
yum -y install puppet-agent
ln -s /opt/puppetlabs/bin/puppet /bin/puppet
if [ $ip ]
then |
shell | fssize=$(echo $v_secondary_partition | tr -d '[:alpha:]')
case $unit in
K) unit=KB ;;
M) unit=MB ;;
G) unit=GB ;;
T) unit=TB ;;
esac
# Check if there is enough space left at end of the disk
disk_free=$(parted --machine --script $v_disk unit ${unit} print free | grep ":free" | tail -n 1 | cut -d ':' -f 4 | tr -d '[:alpha:]')
if [ $((disk_free-fssize)) -lt 0 ]; then
echo "Not enough free space left on disk for a $v_secondary_partition partition. Maximum space left is ${disk_free}${unit}"
exit 1
fi |
shell |
BACKUP_NAME="BACKUP-$(date +%F-%I-%M-%p).7z"
SERVER_DIR="minecraft.jmoore.dev.paper"
echo Backing up Java server...
7za a -mmt -mx9 -t7z $BACKUP_NAME $SERVER_DIR
|
shell | <gh_stars>1000+
#!/bin/bash
# Set up the appropriate rustc toolchain
cd $(dirname $0) |
shell | #!/usr/bin/env bash
if [ "$TRAVIS_BRANCH" = 'master' ] && [ "$TRAVIS_PULL_REQUEST" == 'false' ]; then
mvn -P travis-maven-central --settings ci/settings.xml ;
mvn -P travis-docker-hub ;
fi |
shell | GDAL_NAME="bundled"
fi
case ${1} in
DEV)
echo -e "${SEP}Testing current version against ${GDAL_NAME} GDAL${SEP}"
echo npx @mapbox/node-pre-gyp configure ${GDAL_OPT}
npx @mapbox/node-pre-gyp configure ${GDAL_OPT}
npx @mapbox/node-pre-gyp build -j max
npm test
R=$?
;;
RELEASE) |
shell | # The following line create it in the root folder of the current project.
INSTALL_DIR=Products/${FMK_NAME}.framework
# Working dir will be deleted after the framework creation.
WRK_DIR=build
DEVICE_DIR=${WRK_DIR}/Release-iphoneos/${FMK_NAME}.framework
SIMULATOR_DIR=${WRK_DIR}/Release-iphonesimulator/${FMK_NAME}.framework
# project方式
# xcodebuild -configuration "Release" -target "${FMK_NAME}" -sdk iphoneos clean build
# xcodebuild -configuration "Release" -target "${FMK_NAME}" -sdk iphonesimulator clean build
# workspace方式
xcodebuild -workspace ${WORKSPACE_NAME}".xcworkspace" -configuration "Release" -scheme ${SCHEME_NAME} SYMROOT=$(PWD)/build -sdk iphoneos clean build
xcodebuild -workspace ${WORKSPACE_NAME}".xcworkspace" -configuration "Release" -scheme ${SCHEME_NAME} SYMROOT=$(PWD)/build -sdk iphonesimulator clean build
# Cleaning the oldest.
if [ -d "${INSTALL_DIR}" ]
then |
shell |
echo "Format Zookeeper for Fast failover.."
$HADOOP_PREFIX/bin/hdfs zkfc -formatZK
fi
$HADOOP_PREFIX/sbin/hadoop-daemon.sh start zkfc
$HADOOP_PREFIX/bin/hdfs namenode
;;
standby)
if [[ ! -a /data/hdfs/nn/current/VERSION ]]; then
echo "Bootstrap Standby Namenode.."
$HADOOP_PREFIX/bin/hdfs namenode -bootstrapStandby
fi
$HADOOP_PREFIX/sbin/hadoop-daemon.sh start zkfc
$HADOOP_PREFIX/bin/hdfs namenode |
shell | webexmeetings)
# credit: <NAME> (@erikstam)
name="Cisco Webex Meetings"
type="pkgInDmg"
downloadURL="https://akamaicdn.webex.com/client/webexapp.dmg"
expectedTeamID="DE8Y96K9QP"
targetDir="/Applications"
#blockingProcessesMaxCPU="5"
blockingProcesses=( Webex )
;;
|
shell | export HISTORY=$(opsbro compliance history)
check_history_entry() {
ENTRY="$1"
echo "$HISTORY" | grep -- "$ENTRY" |
shell | fi
sleep 5
done
|
shell | cd ../alumni/_posts
wget "https://docs.google.com/spreadsheets/d/1UJ8eHwcBsdKRjwc6uu2KVhNIn_FSEpLMh6xqMNQ5sUY/pub?gid=2103651667&single=true&output=csv" -O alumni.csv
mkdir -p old
mv *.md old
git pull
python ../../bin/create_alumni.py
git add *.md
git commit -a -m "update alumni"
git push |
shell |
seriationct-create-networkmodel.py --graphshow 0 --x 75 --y 75 --slices 5 --model grid --wiring hierarchy --children 4 --levels 3 --overlap 0.2 --density 0.3 --filename rawnetworkmodels/seriationct-5-networkmodel/seriationct-5 --child_interconnect_percentage 1.0 --gchild_interconnect_percentage 1.0 --child_interconnect_weight 0.01 --gchild_interconnect_weight 1.0 --root_child_weight 0.01 --child_gchild_weight 0.01
cp bin/build-networkmodel.sh rawnetworkmodels/seriationct-5-networkmodel
cd rawnetworkmodels
zip -r seriationct-5-network-gml.zip seriationct-5-networkmodel/*.gml
zip -r seriationct-5-full-network.zip seriationct-5-networkmodel
cp seriationct-5-networkmodel/seriationct-5XY.txt ../xyfiles
mv seriationct-5-network-gml.zip ../networks
|
shell | 部署的前提条件是:
1.ceph的镜像已经在控制节点主机上
2.各节点的/etc/hosts和/etc/ceph/hosts配置完成
3.各主机互相免密
部署流程:
1.运行setup-mon脚本. 例如: ./setup-mon --public_network=1.2.3.0/24 --cluster_network=1.2.3.0/24 --hosts=controller,compute1,compute2
2.运行setup-cephlic脚本. 例如:./setup-cephlic(在部署osd服务之前执行)
3.运行setup-osd脚本. 例如: ./setup-osd --host=controller --disks=/dev/sdb,/dev/sdc 注意: 每次运行该脚本只部署一台主机,可重复运行该脚本。
4.运行check-ceph脚本,例如: ./check-ceph 用来确定ceph集群的状态
|
shell | echo "\033[1;34m^^^ $exercise ^^^\033[0m"
else
fails="$fails\n$exercise"
echo "\033[1;31m^^^ $exercise ^^^\033[0m"
fi
fi
done
if [ -n "$fails" ]; then
echo "Failing exercises:$fails" |
shell |
./.build/debug/$var > $var-output.txt
if [ $? != 0 ]; then
if [ $var != "Failing" ]; then
echo " - Integration Failed"
EXIT_CODE=1
fi
elif [ $var = "Failing" ]; then |
shell | #!/bin/bash
cd $(dirname $0)
source env.sh
docker push labteral/stopover-python:$VERSION
|
shell | #!/bin/bash
. path.sh
. cmd.sh
# TODO: Use locking script to obtain GPU
export CUDA_VISIBLE_DEVICES=1
export TF_CPP_MIN_LOG_LEVEL=2
|
shell |
extern int good_echo(void);
int main(void)
{
int ret;
ret = good_echo();
return ret;
}
EOF |
shell | # Show trace if option -o is used
if [[ $1 == "-o" ]]; then
set -o xtrace
fi
# Warns user to run the script as root
if [ "$EUID" -ne 0 ]; then
echo "Please run as root using sudo." |
shell |
# _CHECK_FQDN
source ${0%/*}/../config.sh 2>/dev/null || :
fqdn=${_CHECK_FQDN:-$(hostname -f)}
which host 2>/dev/null >/dev/null && host ${fqdn} || :
ping -c 1 -w 5 ${fqdn}
# vim:sw=2:ts=2:et:
|
shell | cd ../cmd/main
go build -o ../../bin/main
echo "Binary successfully built"
|
shell | then
echo "error when creating table request"
fi
################################################################################
# 4) INSERT DATA INTO IDNETITY TABLE
mysql enthic < ${SQL_DIR}insert-identity.sql "$1";
if [ $? != 0 ]
then
echo "error when filling table identity"
fi
################################################################################
# 5) CREATE A TABLE OF THE BUNDLE INFORMATION
mysql enthic < ${SQL_DIR}create-table-bundle.sql "$1";
if [ $? != 0 ] |
shell | bgacc=$(ls $indir/$expid\_$cline\_peaks-all* | xargs basename | cut -d "." -f 1 | cut -d "_" -f 4 | uniq)
optacc=$(ls $indir/$expid\_$cline\_peaks-optimal* | xargs basename | cut -d "." -f 1 | cut -d "_" -f 4 | uniq)
bgbed=${indir}/${expid}_${cline}_peaks-all_${bgacc}.bed.gz
bgtopbed=${indir}/${expid}_${cline}_peaks-all_${bgacc}.top.bed.gz
optbed=${indir}/${expid}_${cline}_peaks-optimal_${optacc}.bed.gz
ambibed=${outdir}/peaks/${expid}_${cline}_ambig.bed
|
shell | #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. $CURDIR/../shell_config.sh
SETTINGS="--compile=1 --min_count_to_compile=0 --max_threads=1 --max_memory_usage=8000000 --server_logs_file=/dev/null"
output=$($CLICKHOUSE_CLIENT -q "SELECT length(groupArray(number)) FROM (SELECT * FROM system.numbers LIMIT 1000000)" $SETTINGS 2>&1)
[[ $? -eq 0 ]] && echo "Expected non-zero RC"
if ! echo "$output" | grep -Fc -e 'Memory limit (for query) exceeded' -e 'Cannot compile code' ; then
echo -e 'There is no expected exception "Memory limit (for query) exceeded: would use..." or "Cannot compile code..."' "Whereas got:\n$output"
fi
$CLICKHOUSE_CLIENT -q "SELECT 1"
|
shell | \
--dataset channel \
--dataset-dir=/home/zby/datasets/channel/data_5.19/SNR25.tfrecords \
--input-type full-input \
--num-input-images 1 \
--batch-size 1 \
--scale-factor 2.5 \
\
--measurement-type pilot \
--noise-std 0.1 \
--num-measurements 500 \
\
--z-dim 80 \
--pilot-dim 48 \
--model-types dcgan \ |
shell | # start the required daemons
redis-server /opt/gitstream/redis.conf
nginx
mongod --dbpath /var/opt/gitstream/mongo --fork --syslog
exit 0 # to prevent vagrant up from complaining when these daemons are already running
|
shell |
echo "--------------------------------"
printf "%25s" всего
find $POOLDIR -type f -name "*.cmd" | sed -e 's/[^0-9]//g' | wc -l
printf "%25s" уникальных
find $POOLDIR -type f -name "*.cmd" | sed -e 's/[^0-9]//g' | sort | uniq | wc -l |
shell | # stick a copy in packages' dist/ for tests and local dev
mkdir -p "$PACKAGE_DIRECTORY/dist"
cp "$BUILD_PATH/headless-$BUILD_NAME" "$PACKAGE_DIRECTORY/dist/$CHANNEL-headless-$BUILD_NAME"
# Cleanup
rm -Rf "$BUILD_PATH"
else
echo "$BUILD_NAME version $VERSION was previously package. Skipping."
fi
}
# main script
if [ ! -z "$1" ]; then |
shell | #!/bin/sh
set -e -u -x
sudo sed -i -e "s/^#PACKAGER.*/PACKAGER=\"$PACKAGER_NAME\"/" /etc/abuild.conf
sudo sed -i -e "s/^#MAINTAINER.*/MAINTAINER=\"$PACKAGER_NAME\"/" /etc/abuild.conf
|
shell | #!/bin/bash
set -ex
bash updatePubmedListing.sh
snakemake --cores 1 -T 3
bash downloadCORD19_metadataOnly.sh
|
shell | bower_components/gif.js/dist/gif.worker.js
)
image_files=`find ui icons -name *.jpg -o -name *.jpeg -o -name *.gif -o -name *.png`
all_files="${js_files[@]} ${image_files[@]}"
for file in $all_files
do
mkdir -p build/`dirname $file`
cp $file build/$file
done
# Package into a zip.
rm -f ChromeReview.zip
cd build |
shell | #!/bin/bash
npm run build
git add .
git commit -m "auto-commit"
git push origin master
|
shell | pgi="pgi.jar"
totalNumberOfTests=${#courseTestFiles[@]}
i=1
for file in "${courseTestFiles[@]}"
do
# Generate Piglet code |
shell | #!/bin/bash
find ~/.vim/pack/plugins/start/ -maxdepth 1 -type d -exec sh -c '(cd {} && git pull && git submodule update --init --recursive)' ';'
|
shell | <gh_stars>1-10
#!/bin/bash
cat $0
#Author: <NAME>
#Notes: A cheat |
shell | }
o() {
if [[ -z "$1" ]]; then
# Open the current working directory in a new detached program session.
detach "${BASH_ALIASES[open]:-open}" "${PWD}"
else
detach "${BASH_ALIASES[open]:-open}" "$@"
fi
}
pyformatting() { |
shell | # pip install wheel twine build packaging
rm dist/*
python -m build
twine upload dist/*
|
shell | 3)
sudo apt-get -y update > /dev/null
apt-get install -y sysbench > /dev/null
apt-get install -y fio > /dev/null
;;
esac
echo "----------------- CPU Information ----------------- ---------------------"
lscpu
echo " ------------ Hardware ---------------------------------------------------"
lshw
echo "----------------- CPU Sysbench Test with one thread ---------------------" |
shell | done
# necessary libs
libs="libtps.la"
for lib in $libs; do
if [ ! -s $lib ];then
if [ -s $testDir/$lib ];then
ln -s $testDir/$lib .
fi |
shell | rm -f ~/anaconda.sh
# Python 3
sudo apt-get install -y python3-dev python3-pip python3.8-venv
python3 -m pip install virtualenv
|
shell | PATH=$PATH:$HOME/.rvm/bin
#### Local bin
|
shell | cat /etc/fstab
echo ""
echo "cat /etc/mtab"
echo "----------------------------------------"
cat /etc/mtab
echo ""
echo "lsblk"
echo "----------------------------------------"
lsblk --list --output 'NAME,KNAME,FSTYPE,MOUNTPOINT,LABEL,UUID,PARTTYPE,PARTLABEL,PARTUUID'
echo "" |
shell | _df_log_debug " - $f"
source $f
done
[[ -z "${DOTFILES_CUSTOM_DIR}" ]] && custom_dir="${HOME}/.dotfiles-custom" || custom_dir="${DOTFILES_CUSTOM_DIR}"
if [[ -d $custom_dir ]]; then
_df_log_debug "sourcing custom files"
for f in $custom_dir/*.sh; do |
shell | <gh_stars>10-100
#!/bin/sh
/bin/modoki-auth
|
shell | run_podman rm $cid
}
@test "podman exec - leak check" {
skip_if_remote
|
shell | CWD=$(pwd)
PACKGE_MANAGER="apt-get"
SUDO="sudo"
if [ "$(whoami)" == "root" ]; then |
shell | #!/bin/sh
# success
./simple-test.sh `basename $0 .sh` test5 -a asdf -c fdas --eee blah --ddd -j o --jjj t
|
shell | #!/bin/bash
yarn install
yarn run lint
yarn test |
shell | nps="2 4"
probs=("100K_whole_city.problem")
solvers=("pbjacobi" "gamg")
flags=("" "-fieldsplit_camera_pc_type gamg")
|
shell | <gh_stars>1-10
#!/bin/bash
git clone https://git-wip-us.apache.org/repos/asf/metron.git
cd metron
git checkout apache-metron-0.5.0-rc2
mvn clean package -DskipTests
|
shell | <gh_stars>0
#!/bin/bash
spatialite_osm_net -o slovenia-latest.osm.pbf -d db.sqlite -T roads
|
shell | # done
#
# mv *footprinting_hoco_sort.bed ../merged_into_anchors_hoco
# # Intersect tissue specific files
# cd /Users/mguo123/Google Drive/1_khavari/omics_project-LD/atac_footprinting/merged_into_anchors_hoco
# b. `mv Melanocytes_footprinting_hoco_sort.bed MC_footprinting_hoco_sort.bed`
# c. `mv GDS-D0_footprinting_hoco_sort.bed GDSD0_footprinting_hoco_sort.bed`
# d. `mv GDS-D3_footprinting_hoco_sort.bed GDSD3_footprinting_hoco_sort.bed`
# e. `mv GDS-D6_footprinting_hoco_sort.bed GDSD6_footprinting_hoco_sort.bed`
# f. `####bedtools intersect -wa -wb -a Air_anchors_sort.bed -b Airway_footprinting_sorted.bed -sorted -names footprinting -f 2.5E-6 > Airway_anchors_footprinting.bed` |
shell | #!/bin/sh
./escls-run.sh "echo y | sudo /usr/share/elasticsearch/bin/elasticsearch-plugin install x-pack"
./escls-run.sh "sudo /usr/share/kibana/bin/kibana-plugin install x-pack"
./escls-run.sh "echo 'xpack.security.enabled: false' | sudo tee -a /etc/elasticsearch/elasticsearch.yml"
|
shell | #!/bin/bash
docker login quay.io \
-u ${CENSUSAI_DOCKER_USERNAME} \
-p ${CENSUSAI_DOCKER_PASSWORD}
docker push ${CENSUSAI_IMAGE}
|
shell | exit 1
fi
sleep 2
$NGINX_PATH/sbin/nginx -p $APP_PATH -s reload
if [ "$?" != "0" ]; then |
shell | # Completion script for trt
#
_trt() {
local -a subcmds |
shell | #!/bin/bash
greeting="Hello "
user=$(whoami)
echo "$greeting $user!"
# Hello Incredibles'!
|
shell | if [[ $# -eq 0 ]] ; then
echo 'missing argument: subtitle_filepath'
exit $FAIL
fi
subtitle_filepath="$1" # contains timing as a subtitles file
timing_filepath="${subtitle_filepath/.ass/.tsv}" # timing, as: mot start_time end_time
awk -f <(cat - <<-'EOD'
BEGIN{FS=","} |
shell | echo built $(pwd)
echo "go build static-service-linux-amd64 success!!!"
export GOOS=windows
export GOARCH=amd64
export CGO_ENABLED=0
go build -o build/static-service-windows-amd64 .
echo built $(pwd) |
shell |
echo "cfg path is: $CFG_PATH";
bootstrap_list=/dns4/$NETW/udp/$RPC_PORT/quic/p2p/$PEER_ID;
if [ $BOOTSTRAP_LIST != '0' ]
then |
shell | #!/bin/sh
# $Id: $
if [ $# -lt 1 ]; then
echo USAGE: $0 name
exit 1
fi
cd .. && make && make dist && cd - && ./make.sh $1
|
shell | # Enable 8bit
setopt print_eight_bit
# sh_word_split
setopt sh_word_split
# Change
#~$ echo 'hoge' \' 'fuga' |
shell | --port 3456 \
<&- &
pid=$!
cleanup() {
printf >&5 '\n'
if [ "$(ps -q "${pid}" -o ppid=)" != "$$" ]; then
printf >&5 \
'Exiting. Server seems to have stopped (was pid %d).\n' "${pid}"
else
printf >&5 'Exiting. Also stop server (y/N)? '
line=
read -r line || printf >&5 '\n'
if [ "${line-}" = y ] || [ "${line-}" = Y ]; then
kill "${pid}" |
shell | docker run --name mern-websocket-chat-app-1 --network=mern-websocket-chat-network -d mern-websocket-chat-app
docker run --name mern-websocket-chat-app-2 --network=mern-websocket-chat-network -d mern-websocket-chat-app
docker run --name mern-websocket-chat-app-3 --network=mern-websocket-chat-network -d mern-websocket-chat-app
docker build -t mern-websocket-chat-webserver webserver/.
docker run --name mern-websocket-chat-webserver-1 --network=mern-websocket-chat-network -p 80:80 -p 443:443 -d mern-websocket-chat-webserver
docker exec -it mern-websocket-chat-database-1 bash -c "mongoimport -vvv --jsonArray --uri='mongodb://@localhost/mern-websocket-chat' --collection=users --file=/tmp/data/users.json" |
shell | #!/bin/bash
set -eo pipefail
apt-get update
apt-get dist-upgrade -y
echo "Rebooting the machine..."
reboot
sleep 60
|
shell | --cache_dir cache/${MODEL}_${TASK}/ \
--train_examples $NUM_TRAIN \
--test_examples $NUM_TEST \
--unlabeled_examples $NUM_UNLABEL \
--sc_max_steps 5000 \
--pet_max_steps 1000 \
--lm_training \
--pet_per_gpu_train_batch_size 1 \
--pet_per_gpu_unlabeled_batch_size 3 \
--sc_gradient_accumulation_steps 4 \
--pet_gradient_accumulation_steps 4 \
--split_examples_evenly \
--no_distillation \ |
shell | VERSION=1.62.0
FOLDER_NAME=boost_1_62_0
EXTENSION=
if [[ ${OSTYPE} == "msys" ]]; then
EXTENSION=.exe
fi
ORIGIN=$PWD
if [ ! -f "$FOLDER_NAME.tar.gz" ] ; then
wget -nc https://sourceforge.net/projects/boost/files/boost/$VERSION/$FOLDER_NAME.tar.gz || exit 1
fi |
shell |
for fn in ${top_srcdir}/test/datafile_xml.*; do
run_test ./drive_data_scanner -P $fn
on_error_fail_with "$fn does not match"
done
for fn in ${top_srcdir}/test/log-samples/*.txt; do |
shell | function install_supervisor {
easy_install supervisor
rm -rf /home/work/bin/supervisor
mkdir -p /home/work/bin/
wget http://192.168.3.11/download/supervisor/supervisor.tgz || exit
tar xf supervisor.tgz -C /home/work/bin/
local_ip=`cat /etc/sysconfig/network-scripts/ifcfg-eth0 |grep IPADDR |awk -F= '{print $2}'`
cd /home/work/bin/supervisor/ && \
sed -i "s/LOCAL_IP_ADDR/$local_ip/g" supervisord.conf
#supervisord -c supervisord.conf
#echo 'cd /home/work/bin/supervisor/ && supervisord -c supervisord.conf' >> /etc/rc.local
}
#install_jdk |
shell | CODE_WRONG_USAGE=255
CODE_NO_PYTHON=254
# input params
if [ $# -lt 1 ]; then
echo "Runs the given script with the first found Python version"
echo "Usage: $0 <PYTHON_SCRIPT>"
exit $CODE_WRONG_USAGE
fi
SCRIPT="$1" |
shell | sudo apt install -y htop # a beautiful top
sudo apt install -y fortunes cowsay figlet lolcat fortunes-off # to play in the command line
echo "wireshark-common wireshark-common/install-setuid boolean true" | sudo debconf-set-selections
sudo DEBIAN_FRONTEND=noninteractive apt install -y wireshark
# theming |
Subsets and Splits