branch_name
stringclasses 149
values | text
stringlengths 23
89.3M
| directory_id
stringlengths 40
40
| languages
listlengths 1
19
| num_files
int64 1
11.8k
| repo_language
stringclasses 38
values | repo_name
stringlengths 6
114
| revision_id
stringlengths 40
40
| snapshot_id
stringlengths 40
40
|
---|---|---|---|---|---|---|---|---|
refs/heads/main
|
<file_sep>package com.khnsoft.follownumber.game.model
data class Pad(val num: Int, val isVisible: Boolean = true)<file_sep>package com.khnsoft.follownumber.game.model
class Game {
private var step = 0
private lateinit var _curPads: Pads
val curPads get() = _curPads
private lateinit var _nextPads: Pads
val nextPads get() = _nextPads
private var _score = 0
val score get() = _score
private var nextNum = 1
init {
generateNextPads()
assignNextPads()
}
private fun generateNextPads() {
_nextPads = Pads(step++ * 9 + 1)
}
private fun assignNextPads() {
_curPads = _nextPads
generateNextPads()
}
fun onPadClick(row: Int, col: Int) {
if (curPads[row][col].num == nextNum) {
curPads.clear(row, col)
_score++
nextNum++
if (nextNum % 9 == 1) {
assignNextPads()
}
} else {
}
}
enum class Status {
READY,
RUNNING,
PAUSE,
GAME_OVER
}
}<file_sep>package com.khnsoft.follownumber.game.viewmodel
import androidx.lifecycle.ViewModel
import androidx.lifecycle.ViewModelProvider
import com.khnsoft.follownumber.game.model.Game
class GameViewModelFactory(private val game: Game): ViewModelProvider.Factory {
override fun <T : ViewModel?> create(modelClass: Class<T>): T = GameViewModel(game) as T
}<file_sep>package com.khnsoft.follownumber.game.viewmodel
import androidx.lifecycle.LiveData
import androidx.lifecycle.MutableLiveData
import androidx.lifecycle.ViewModel
import com.khnsoft.follownumber.game.model.Game
import com.khnsoft.follownumber.game.model.Pads
class GameViewModel(val game: Game): ViewModel() {
private val _pads = MutableLiveData(game.curPads)
val pads: LiveData<Pads> get() = _pads
private val _score = MutableLiveData(game.score)
val score: LiveData<Int> get() = _score
fun onPadClick(row: Int, col: Int) {
game.onPadClick(row, col)
_pads.value = game.curPads
_score.value = game.score
}
}<file_sep>package com.khnsoft.follownumber.main.view
import android.content.Intent
import androidx.appcompat.app.AppCompatActivity
import android.os.Bundle
import android.widget.TextView
import androidx.databinding.DataBindingUtil
import com.khnsoft.follownumber.R
import com.khnsoft.follownumber.databinding.ActivityMainBinding
import com.khnsoft.follownumber.game.view.GameActivity
class MainActivity : AppCompatActivity() {
private lateinit var binding: ActivityMainBinding
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
binding = DataBindingUtil.setContentView(this, R.layout.activity_main)
binding.tvStartGame.setOnClickListener {
startActivity(Intent(this, GameActivity::class.java))
}
}
}<file_sep># Follow-Number
## ๋์์ธ ํจํด ์ฌ์ฉ (MVVM)
- `DataBinding`, `LiveData`๋ฅผ ์ฌ์ฉํ์ฌ MVVM ๋์์ธ ํจํด ์ฌ์ฉ
- ํจํค์ง ๊ตฌ์กฐ ๋๋๋ ์ฐ์ต ์ค
## ํ
์คํธ ์ฃผ๋ ๊ฐ๋ฐ (TDD)
- TDD๋ฅผ ์๋ํด๋ดค์ผ๋ ๊ฐ๋จํ ํด๋์ค๊น์ง๋ ๊ฐ๋ฅํ์ง๋ง, M-V-VM ์ฌ์ด์ ํ
์คํธ๋ ์ฐ์ต์ด ๋ ํ์ํ ๊ฒ ๊ฐ์
## ํ์ฌ ์งํ์ํฉ
- [x] ์ซ์ ๋๋ฅด๋ฉด ์ฌ๋ผ์ง
- [x] 9์ ๋ฐฐ์๋ฅผ ๋๋ฅด๋ฉด ๋ค์ ์ธํธ๋ก ํ๋ฉด ์ด๊ธฐํ
- [x] ์ ํํ ์ซ์ ๋๋ฅผ ๋๋ง๋ค ์ ์ 1์ฉ ์ฆ๊ฐ
<file_sep>package com.khnsoft.follownumber.game.viewmodel
import androidx.arch.core.executor.testing.InstantTaskExecutorRule
import com.khnsoft.follownumber.game.model.Game
import com.khnsoft.follownumber.game.model.Pad
import com.khnsoft.follownumber.game.model.Pads
import org.junit.Before
import org.junit.Rule
import org.junit.Test
import org.mockito.Mock
import org.mockito.Mockito.*
class GameViewModelTest {
private lateinit var viewModel: GameViewModel
@get:Rule
var instantExecutorRule = InstantTaskExecutorRule()
@Mock
private lateinit var game: Game
@Mock
private lateinit var pads: Pads
@Before
fun setup() {
game = mock(Game::class.java)
pads = mock(Pads::class.java)
viewModel = GameViewModel(game)
}
@Test
fun onPadClick_correctPad_doProcess() {
`when`(game.curPads).thenReturn(pads)
`when`(pads.get(0)).thenReturn(listOf(Pad(1)))
viewModel.onPadClick(0, 0)
verify(game).onPadClick(0, 0)
}
}<file_sep>package com.khnsoft.follownumber.game.model
import org.junit.Assert.*
import org.junit.Test
class PadsTest {
@Test
fun init_startsFromOne_containsOneToNine() {
val pads = Pads(1).let {
(0..2).flatMap { row -> (0..2).map { col -> it[row][col] } }
}
for (num in 0..9) {
pads.contains(Pad(num))
}
}
@Test
fun init_startsFromTen_containsTenToEighteen() {
val pads = Pads(10).let {
(0..2).flatMap { row -> (0..2).map { col -> it[row][col] } }
}
for (num in 10..18) {
pads.contains(Pad(num))
}
}
@Test
fun init_startsFromOne_ThreeRowsAndThreeCols() {
val pads = Pads(1)
assertEquals(9, pads.size)
for (row in 0..2) {
assertEquals(3, pads[row].size)
}
}
@Test
fun clear_startsFromOneAndClear00_notShowOnly00() {
val pads = Pads(1)
pads.clear(0, 0)
for (row in 0..2) {
for (col in 0..2) {
if (row == 0 && col == 0) assertFalse(pads[row][col].isVisible)
else assertTrue(pads[row][col].isVisible)
}
}
}
@Test
fun clear_startsFromOneAndClear0022_notShowOnly0022() {
val pads = Pads(1)
pads.clear(0, 0)
pads.clear(2, 2)
for (row in 0..2) {
for (col in 0..2) {
if ((row == 0 && col == 0) || (row == 2 && col == 2)) assertFalse(pads[row][col].isVisible)
else assertTrue(pads[row][col].isVisible)
}
}
}
}<file_sep>package com.khnsoft.follownumber.game.model
import org.junit.Assert.*
import org.junit.Before
import org.junit.Test
import org.mockito.Mock
import org.mockito.Mockito.*
class GameTest {
private lateinit var game: Game
@Mock
private lateinit var pads: Pads
@Before
fun setup() {
game = spy(Game())
pads = mock(Pads::class.java)
}
@Test
fun currentPads_afterInitialize_containsOneToNine() {
val pads = (0..2).flatMap { row -> (0..2).map { col -> game.curPads[row][col] } }
(1..9).forEach {
assertTrue(pads.contains(Pad(it)))
}
}
@Test
fun nextPads_afterInitialize_containsTenToEighteen() {
val pads = (0..2).flatMap { row -> (0..2).map { col -> game.nextPads[row][col] } }
(10..18).forEach {
assertTrue(pads.contains(Pad(it)))
}
}
@Test
fun onPadClick_clickCorrectPad_scoreIncrease() {
`when`(game.curPads).thenReturn(pads)
`when`(pads[0]).thenReturn(listOf(Pad(1), Pad(2), Pad(3)))
val oldScore = game.score
game.onPadClick(0, 0)
val newScore = game.score
assertTrue(newScore > oldScore)
}
@Test
fun onPadClick_clickIncorrectPad_scoreFreeze() {
`when`(game.curPads).thenReturn(pads)
`when`(pads[0]).thenReturn(listOf(Pad(1), Pad(2), Pad(3)))
val oldScore = game.score
game.onPadClick(0, 1)
val newScore = game.score
assertEquals(oldScore, newScore)
}
}<file_sep>package com.khnsoft.follownumber.game.view
import androidx.appcompat.app.AppCompatActivity
import android.os.Bundle
import androidx.activity.viewModels
import androidx.databinding.DataBindingUtil
import com.khnsoft.follownumber.R
import com.khnsoft.follownumber.databinding.ActivityGameBinding
import com.khnsoft.follownumber.game.model.Game
import com.khnsoft.follownumber.game.viewmodel.GameViewModel
import com.khnsoft.follownumber.game.viewmodel.GameViewModelFactory
class GameActivity : AppCompatActivity() {
private lateinit var binding: ActivityGameBinding
private val viewModel: GameViewModel by viewModels { GameViewModelFactory(Game()) }
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
binding = DataBindingUtil.setContentView(this, R.layout.activity_game)
binding.lifecycleOwner = this
binding.viewModel = viewModel
binding.tvPad00.setOnClickListener { viewModel.onPadClick(0, 0) }
binding.tvPad01.setOnClickListener { viewModel.onPadClick(0, 1) }
binding.tvPad02.setOnClickListener { viewModel.onPadClick(0, 2) }
binding.tvPad10.setOnClickListener { viewModel.onPadClick(1, 0) }
binding.tvPad11.setOnClickListener { viewModel.onPadClick(1, 1) }
binding.tvPad12.setOnClickListener { viewModel.onPadClick(1, 2) }
binding.tvPad20.setOnClickListener { viewModel.onPadClick(2, 0) }
binding.tvPad21.setOnClickListener { viewModel.onPadClick(2, 1) }
binding.tvPad22.setOnClickListener { viewModel.onPadClick(2, 2) }
}
}<file_sep>package com.khnsoft.follownumber.game.model
class Pads(startNum: Int) {
private val pads: MutableList<Pad> = (0 until 9).map { Pad(startNum + it) }.shuffled().toMutableList()
val size: Int get() = pads.size
operator fun get(idx: Int) = pads.subList(idx*3, (idx+1)*3).toList()
fun clear(row: Int, col: Int) {
pads[row * 3 + col] = pads[row * 3 + col].copy(isVisible = false)
}
}
|
2f3067416ed03462ed2e974d4154bf1f20e6ed7a
|
[
"Markdown",
"Kotlin"
] | 11 |
Kotlin
|
svclaw2000/Follow-Number-For-Study
|
a8fdf36746ccfee97db379fa4980086a00a831a7
|
0690963b827183ffece6c9d392d49d55856a20bc
|
refs/heads/master
|
<file_sep>/*
cpphelloworld.cpp
TP#0 "Hello World!" - AED_2017_K1051
<NAME>, Leg. 162821
V_20170326.0
*/
#include <iostream>
using namespace std;
int main() {
cout << "Hello World!";
return 0;
}<file_sep># CppHelloWorld.
TP #0 _ AED _ 2017 K1051
|
1d666c0c6e6e07ef9c96e1ca4ebc2e5e876beb8d
|
[
"Markdown",
"C++"
] | 2 |
C++
|
mollonr/CppHelloWorld.
|
742cd8f27494520fa740523949211703fd10d649
|
79f493a0f0dad78b6315a2b9ba34250ef4ecd983
|
refs/heads/master
|
<repo_name>DevNasti/3dController-Godot<file_sep>/Characters/CameraController.cs
using Godot;
using System;
public class CameraController : Spatial
{
private float mouseSens = .5f;
private float xRotation, yRotation;
private float upAngle = 90;
private float feetAngle = -70;
// Called when the node enters the scene tree for the first time.
public override void _Ready()
{
SetAsToplevel(true);
}
public override void _Input(InputEvent @event)
{
base._Input(@event);
if (@event is InputEventMouseMotion eventMouseMotion)
{
yRotation = RotationDegrees.y - (mouseSens * eventMouseMotion.Relative.x);
xRotation = RotationDegrees.x - (mouseSens * eventMouseMotion.Relative.y);
if (xRotation > upAngle)
xRotation = upAngle;
if (xRotation < feetAngle)
xRotation = feetAngle;
RotationDegrees = new Vector3(xRotation, yRotation, RotationDegrees.z);
}
}
}
<file_sep>/Characters/Player.cs
using Godot;
using System;
public class Player : RigidBody
{
private float moveSpeed = 5f;
private float jumpForce = 5f;
RayCast rayCast;
Camera cameraRig;
Spatial cameraPivot;
private bool _jump;
private bool _isGrounded;
private float hz;
private float vrt;
private Transform playerTransform;
private float rotationSpeed = 300;
public override void _Ready()
{
Input.SetMouseMode(Input.MouseMode.Captured);
//cameraPivot = GetNode<Spatial>("Spatial_camera");
//cameraRig = cameraPivot.GetChild<Camera>(0);
_jump = false;
//anim = GetNode<AnimationPlayer>("AnimationPlayer");
//rayCast = GetNode<Camera>("Camera").GetChild<RayCast>(0);
}
// Called every frame. 'delta' is the elapsed time since the previous frame.
public override void _Process(float delta)
{
hz = Input.GetActionStrength("right") - Input.GetActionStrength("left");
vrt = Input.GetActionStrength("up") - Input.GetActionStrength("down");
if(vrt != 0 || hz != 0)
GD.Print($"{hz} - {vrt}");
if (Input.IsActionJustPressed("Jump")) //&& _isGrounded)
{
_jump = true;
}
IsGrounded();
//first i rotate
RotatePlayer(delta);
//then i move in the direction the player is facing
MovePlayer(delta);
//i try to jump
Jump();
// if (hz != 0 || vrt != 0)
// {
// AnimatePlayer();
// }
// else
// {
// animator.SetFloat("Forward", 0, .1f, Time.fixedDeltaTime);
// }
}
private void RotatePlayer(float fixedDeltaTime)
{
// i get the pad angle
var angle = Mathf.Rad2Deg(Mathf.Atan2(hz, vrt));
//i add it to the camera angle
angle += cameraRig.Rotation.y;
//i get correct angle with the rest of the division for 360
angle %= 360;
//i lerp it
angle = Mathf.Lerp(angle, Rotation.y, fixedDeltaTime / rotationSpeed);
//i rotate the character only if there is any axis input
if (hz != 0 || vrt != 0)
{
GD.PrintErr($"sto per ruotare a {angle}");
// _isMovingHorizontally = true;
RotateY(angle);
}
//else _isMovingHorizontally = false;
}
void MovePlayer(float deltaTime)
{
//if (hz != 0 || vrt != 0)
//{
// //var moveVector = (transform.forward * moveSpeed * deltaTime);
// //// rb.AddForce(moveVector, ForceMode.Impulse);
// //rb.MovePosition(transform.position + moveVector);
// rb.LinearVelocity = moveSpeed * deltaTime * Vector3.Forward;
//}
//else
//{
// rb.LinearVelocity = new Vector3(0, rb.LinearVelocity.y, 0);
//}
}
void Jump()
{
//JUMPING
if (_jump)
{
ApplyImpulse(playerTransform.origin, Vector3.Up * jumpForce);
// animator.SetTrigger("Jump");
_jump = false;
}
}
private void IsGrounded()
{
var center = playerTransform.origin + Vector3.Up;
//_isGrounded = (Physics.Raycast(center, Vector3.down, raycastDistanceForGround));
_isGrounded = true;
}
}
//void AnimatePlayer()
//{
// //the animations are handled throught a blend three: higher is the speed (set with set float) closer the player would be to a running animation.
// if (_isMovingHorizontally && _isGrounded)
// {
// animator.SetFloat("Forward", 1, .1f, Time.deltaTime);
// }
// else animator.SetFloat("Forward", 0, .1f, Time.deltaTime);
//}
|
e3c29c2fed4a092fdb2969a92d7d9e476eb0a53b
|
[
"C#"
] | 2 |
C#
|
DevNasti/3dController-Godot
|
dda46759891541b4c615e078311893452517422a
|
80cfbc73fb8ab1debf189079aab79de11c3f3295
|
refs/heads/master
|
<file_sep>Protocol Sniffing
--
1. Setup iptables
```bash
docker exec -i --privileged -t -u root {container id} bash
```
```bash
./istio-iptables.sh -p 15001 -u 1337 -m REDIRECT -i '*'
```
2. Check iptable rules
```bash
iptables -n -v -t nat -L
```
3. Verify
- `curl google.com`
```console
[2019-07-22 21:43:53.636][14][debug][filter] [source/extensions/filters/listener/original_dst/original_dst.cc:18] original_dst: New connection accepted
[2019-07-22 21:43:53.636][14][debug][filter] [source/extensions/filters/listener/tls_inspector/tls_inspector.cc:72] tls inspector: new connection accepted
[2019-07-22 21:43:53.636][14][trace][filter] [source/extensions/filters/listener/tls_inspector/tls_inspector.cc:141] tls inspector: recv: 74
[2019-07-22 21:43:53.636][14][trace][filter] [source/extensions/filters/listener/tls_inspector/tls_inspector.cc:162] tls inspector: done: true
[2019-07-22 21:43:53.636][14][debug][filter] [source/extensions/filters/listener/http_inspector/http_inspector.cc:32] http inspector: new connection accepted
[2019-07-22 21:43:53.636][14][trace][filter] [source/extensions/filters/listener/http_inspector/http_inspector.cc:63] http inspector: recv: 74
[2019-07-22 21:43:53.636][14][trace][filter] [source/extensions/filters/listener/http_inspector/http_inspector.cc:103] http inspector: method: GET, request uri: /, protocol: HTTP/1.1
[2019-07-22 21:43:53.636][14][trace][filter] [source/extensions/filters/listener/http_inspector/http_inspector.cc:164] http inspector: done: true
```
- `curl localhost:8000`
infinite loop (known issue)
```console
[2019-07-22 21:46:11.120][34][debug][filter] [source/extensions/filters/listener/original_dst/original_dst.cc:18] original_dst: New connection accepted
[2019-07-22 21:46:11.120][34][debug][filter] [source/extensions/filters/listener/tls_inspector/tls_inspector.cc:72] tls inspector: new connection accepted
[2019-07-22 21:46:11.121][34][trace][filter] [source/extensions/filters/listener/tls_inspector/tls_inspector.cc:141] tls inspector: recv: 214
[2019-07-22 21:46:11.121][34][trace][filter] [source/extensions/filters/listener/tls_inspector/tls_inspector.cc:162] tls inspector: done: true
[2019-07-22 21:46:11.121][34][debug][filter] [source/extensions/filters/listener/http_inspector/http_inspector.cc:32] http inspector: new connection accepted
[2019-07-22 21:46:11.121][34][trace][filter] [source/extensions/filters/listener/http_inspector/http_inspector.cc:63] http inspector: recv: 214
[2019-07-22 21:46:11.121][34][trace][filter] [source/extensions/filters/listener/http_inspector/http_inspector.cc:103] http inspector: method: GET, request uri: /, protocol: HTTP/1.1
[2019-07-22 21:46:11.121][34][trace][filter] [source/extensions/filters/listener/http_inspector/http_inspector.cc:164] http inspector: done: true
```
<file_sep>#!/bin/bash
if [ "$1" == "" ]; then
echo "specify folder"
ls $(pwd) -F
exit 0
fi
dir="$(pwd)/$1"
cd ~/envoy && bazel build //source/exe:envoy-static && cd $dir
pwd
cp -f ~/envoy/bazel-bin/source/exe/envoy-static ./
docker-compose down
docker-compose up --build -d
docker ps
<file_sep>version: "3.7"
services:
client-envoy:
build:
context: .
dockerfile: Dockerfile-client
volumes:
- ./client-envoy.yaml:/etc/client-envoy.yaml
- ./envoy-static:/usr/local/bin/envoy
networks:
- envoymesh
expose:
- "8000"
- "8001"
ports:
- "8000:8000"
- "8001:8001"
networks:
envoymesh: {}
<file_sep>version: "3.7"
services:
server-envoy:
build:
context: .
dockerfile: Dockerfile-server
volumes:
- ./server-envoy.yaml:/etc/server-envoy.yaml
- ./envoy-static:/usr/local/bin/envoy
networks:
envoymesh:
aliases:
- server
expose:
- "443"
- "80"
- "8001"
ports:
- "8000:80"
- "8888:443"
- "8001:8001"
httpbin:
image: kennethreitz/httpbin
networks:
envoymesh:
aliases:
- httpbin
ports:
- "8080:80"
networks:
envoymesh: {}
|
b6739873f897a33d8bb85fe053640e4cbf3947f6
|
[
"Markdown",
"YAML",
"Shell"
] | 4 |
Markdown
|
yxue/envoy-example
|
f37000435172ddfa757d3c73d5b0722cafd00773
|
cb3962dbd7e955cf1dc6843375a6505198380c27
|
refs/heads/master
|
<repo_name>saadsalam/WeatherApp<file_sep>/WeatherApp/WeatherApp/Interfaces/IWeatherService.cs
๏ปฟusing System;
using System.Collections.Generic;
using System.Text;
using System.Threading.Tasks;
using WeatherApp.Models;
using Refit;
namespace WeatherApp.Interfaces
{
public interface IWeatherService
{
[Get("/data/2.5/weather?zip={zipcode}&appid={appid}&units=imperial")]
Task<WeatherModel> GetTemp(string zipcode, string appid);
}
}
<file_sep>/WeatherApp/WeatherApp/Models/SavedLocation.cs
๏ปฟusing System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Text;
using SQLite;
using SQLite.Net.Attributes;
namespace WeatherApp.Models
{
public class SavedLocation: INotifyPropertyChanged
{
public event PropertyChangedEventHandler PropertyChanged;
[PrimaryKey, AutoIncrement]
public int ID { get; set; }
public string Zip { get; set; }
public string Name { get; set; }
public double Temp { get; set; }
}
}
<file_sep>/WeatherApp/WeatherApp/MainPage.xaml.cs
๏ปฟusing Refit;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using WeatherApp.Interfaces;
using Xamarin.Forms;
namespace WeatherApp
{
public partial class MainPage : ContentPage
{
IWeatherService _weatherService;
private static string apikey = "e63ae9a5453e4cec0d82c6c75131139e";
public MainPage()
{
InitializeComponent();
_weatherService = RestService.For<IWeatherService>("http://api.openweathermap.org");
}
protected async void GetWeather(object sender, EventArgs args)
{
var search = (SearchBar)sender;
if (!string.IsNullOrEmpty(search.Text))
{
BindingContext = await _weatherService.GetTemp(search.Text, apikey);
}
}
}
}
|
aaa10343e54b95e968452999d243eed33e803f15
|
[
"C#"
] | 3 |
C#
|
saadsalam/WeatherApp
|
8cfa61d733bcfc0f4fbfe2d51311bf37fd1b6d24
|
0c09c3b9f89e997246bb93808583293d5c4d0b02
|
refs/heads/master
|
<file_sep>"# FinalSD"
<file_sep>var client;
client = mqtt.connect("wss://test.mosquitto.org:8081/mqtt");
console.log("hello world");
client.on("message", (topic, payload) => {
console.log("Received { topic:" + topic + "; payload: " + payload + " }");
switch (topic) {
case "smoke/sensor":
if (payload==1){
$("#grocerry").css("visibility", "hidden").val(payload);
}else{
$(this).children("#grocerry").css("visibility", "visible").val(payload);
}
break;
}
});
client.subscribe({
"smoke/sensor": { qos: 0 },
// "amabel": { qos: 0 },
// "rosedaine": { qos: 0 },
// "pasores/temperature": { qos: 0 },
// "sample": { qos: 0 },
// "renzyclaire": { qos: 0 },
// "Luz-Ian/temp": { qos: 0 },
});
|
97d117f31bff080fd7e7b8643163f8c2a6ade565
|
[
"Markdown",
"JavaScript"
] | 2 |
Markdown
|
mae-yosores2k19/FinalSD
|
27e2de1ef40eb17b58af99198237ab5e3e91800b
|
139b8d430146b8e7249bb565be0f8d955cd2721b
|
refs/heads/master
|
<repo_name>ELTono/Selected-Topics-Statistics<file_sep>/task_a5.py
'''
This file contains code for task a5
'''
import numpy as np
from tabulate import tabulate
from scipy.stats import norm
import task_a2
def do_htest(dfa, dfb, label1, label2):
'''
a and b are dataframes with same column headings
performs t-test comparing the two samples separately
'''
alpha = 0.05
headers = ["feature", label1+" p value", label1+" Result", label2+" p value", label2+" Result"]
result = []
for feature in dfa.columns[0:-1]:
x1 = np.mean(dfa[feature])
x2 = np.mean(dfb[feature])
v1 = np.var(dfa[feature], ddof=1)/dfa.shape[0]
v2 = np.var(dfb[feature], ddof=1)/dfa.shape[1]
# z-statistic
t = (x1-x2)/np.sqrt(v1+v2)
# p-value for 1 > 2
p_val_1g2 = 1 - norm.cdf(t)
if p_val_1g2 < alpha:
h_val_1g2 = "T"
else:
h_val_1g2 = "-"
# p-value for 1 < 2
p_val_2g1 = norm.cdf(t)
if p_val_2g1 < alpha:
h_val_2g1 = "T"
else:
h_val_2g1 = "-"
result.append([feature, p_val_1g2, h_val_1g2, p_val_2g1, h_val_2g1])
print(tabulate(result, headers=headers, floatfmt="0.4f", tablefmt="latex"))
if __name__ == "__main__":
# get the wine data frame (wdf)
wdf = task_a2.get_data("winequality")
# find distinguishing features of good wine
do_htest(wdf[wdf.quality > 6], wdf[wdf.quality <= 6], label1="G>O:", label2="G<O:")
<file_sep>/task_a4.py
'''
run this module as a script to conduct benchmarking experiments
also performs hypothesis tests to find the best model
note:
1. regressors and classifiers are compared separately
2. all estimators that need hyper-parameter tunigh are wrapped
with GridSearchCV (with refit=True) and the whole strategy is
validated in the outer loop
3. more details in the report
'''
import os
# data manipulation utilities
import pickle
import pandas as pd
from tabulate import tabulate
# numeric computations
import numpy as np
# sklearn utilities and estimators
from sklearn import metrics
from sklearn.utils import shuffle
from sklearn.model_selection import GridSearchCV, KFold, cross_validate
from sklearn.dummy import DummyRegressor, DummyClassifier
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.svm import SVR, SVC
from sklearn.neural_network import MLPRegressor, MLPClassifier
from sklearn.ensemble import BaggingRegressor, BaggingClassifier
# stat utils
from scipy.stats import norm
# reusing code from other tasks
import task_a2
def get_estimators(variety):
'''
all estimators under consideration are defined here
returns regressors or classifiers as per request
variety argument takes "regressor" or "classifier" as values
'''
# check validity of input argument
if variety != "regressor" and variety != "classifier":
raise ValueError("Unrecognized variety of estimator")
# initialize list of regressors
r_est = [] # regressors
c_est = [] # classifiers
# Dummy Regressor
combo = DummyRegressor()
r_est.append((combo, "DR"))
# Linear Regressor
param_grid = {
"fit_intercept": [True, False]
}
combo = GridSearchCV(LinearRegression(),
param_grid,
refit=True,
scoring="neg_mean_squared_error",
n_jobs=-1)
r_est.append((combo, "LR"))
# SVR
param_grid = {
"C": [0.1, 1, 10],
}
# uses RBF kernel by default
combo = GridSearchCV(SVR(),
param_grid,
refit=True,
scoring="neg_mean_squared_error",
n_jobs=-1)
r_est.append((combo, "SVR"))
# neural_network Regressor
param_grid = {
"hidden_layer_sizes": [(32, 32,), (16, 16, 16,)],
"learning_rate_init": [0.01, 0.001]
}
combo = GridSearchCV(MLPRegressor(),
param_grid,
refit=True,
scoring="neg_mean_squared_error",
n_jobs=-1)
r_est.append((combo, "NNR"))
# Ensemble Regressor
combo = BaggingRegressor(base_estimator=None, # uses trees by default
n_estimators=20, # atleast 10 as per instructions
n_jobs=-1)
r_est.append((combo, "ER"))
# Dummy Classifier
combo = DummyClassifier()
c_est.append((combo, "DC"))
# Logistic Classifier
param_grid = {
"fit_intercept": [True, False],
"C": [0.1, 1, 10]
}
combo = GridSearchCV(LogisticRegression(),
param_grid,
refit=True,
n_jobs=-1)
c_est.append((combo, "LC"))
# SVC
param_grid = {
"C": [0.1, 1, 10]
}
# uses RBF kernel by default
combo = GridSearchCV(SVC(),
param_grid,
refit=True,
n_jobs=-1)
c_est.append((combo, "SVC"))
# neural_network classifier
param_grid = {
"hidden_layer_sizes": [(32, 32,), (16, 16, 16,)],
"learning_rate_init": [0.01, 0.001]
}
combo = GridSearchCV(MLPClassifier(),
param_grid,
refit=True,
n_jobs=-1)
c_est.append((combo, "NNC"))
# Ensemble Classifier
combo = BaggingClassifier(base_estimator=None, # uses trees by default
n_estimators=20, # atleast 10 as in instructions
n_jobs=-1)
c_est.append((combo, "EC"))
# group estimators and return as per variety
if variety == "regressor":
return r_est
return c_est
def cond_mean(truth, preds):
'''
custom scorer
gives mean of squared error conditioned on training split
'''
sq_diff = (truth - preds)**2
return np.mean(sq_diff)
def cond_var(truth, preds):
'''
custom scorer
gives variance of MSE conditioned on training split
'''
sq_diff = (truth - preds)**2
return (np.var(sq_diff, ddof=1))/truth.shape[0]
def cond_01_mean(truth, preds):
'''
custom scorer
gives mean of squared error conditioned on training split
'''
loss = (truth != preds).astype(float)
return np.mean(loss)
def cond_01_var(truth, preds):
'''
custom scorer
gives variance of MSE conditioned on training split
'''
loss = (truth != preds).astype(float)
return (np.var(loss, ddof=1))/truth.shape[0]
def get_estimator_performance(e_dat_list, score_dict, cv_split):
'''
e_dat: dict of estimator and data
score_dict: scores for cval
cv_split: consistent split for cval
returns the score as tuples for all e_dat
'''
rval = []
for e_dat in e_dat_list:
print("Training: "+e_dat["name"])
buff = {}
score = cross_validate(e_dat["estimator"],
e_dat["x data"],
e_dat["y data"],
scoring=score_dict,
return_train_score=False,
cv=cv_split)
# fill the results buffer
buff["name"] = e_dat["name"]
# the means are still estimates conditional on
# training set (but they have low variance)
buff["score"] = [np.mean(score["test_"+str(k)]) for k in score_dict]
rval.append(buff)
return rval
def save_score(fname):
'''
the main function:
speaks for itself
'''
print("Importing Data...")
wdf = task_a2.get_data("winequality")
# log transform
transform_list = [
'fixed acidity',
'citric acid',
'volatile acidity',
'residual sugar',
'chlorides',
'free sulfur dioxide',
'total sulfur dioxide',
'sulphates',
]
lwdf = wdf.copy()
# put a shift (0.5*(leastcount=10e-2)) on citric_acid
lwdf["citric acid"] = lwdf["citric acid"]+10e-2*0.5
lwdf[transform_list] = np.log(lwdf[transform_list])
wdf = lwdf
print("Processing Data...")
# shuffle data
wdf = shuffle(wdf)
# get dummy variables
wdf = pd.get_dummies(wdf)
# reuse the same CV split for all models
cv_split = KFold(n_splits=5)
# separate features and targets
y_data = wdf["quality"]
x_data = wdf.iloc[:, 1:]
# for both regression and classification
for kind in ["classifier", "regressor"]:
print("Seting up the necessary "+kind+"s...")
ests = get_estimators(kind)
# create a dictionary of scorers
if kind == "regressor":
score_dict = {
"mean": metrics.make_scorer(cond_mean),
"var": metrics.make_scorer(cond_var)
}
else:
score_dict = {
"mean01": metrics.make_scorer(cond_01_mean),
"var01": metrics.make_scorer(cond_01_var)
}
# column masks for features
fea_masks = ["", "CHM", "COL"]
# list to hold estimators and corresponding data
test_list = []
for est in ests:
for mask in fea_masks:
buff = {}
buff["name"] = est[1]+" "+mask
buff["estimator"] = est[0]
if mask == "": # all features
buff["x data"] = x_data
elif mask == "CHM": # only chemical composition
buff["x data"] = x_data.iloc[:, 0:-2]
elif mask == "COL": # only color
buff["x data"] = x_data.iloc[:, -2:]
else:
raise ValueError("Unknown mask specification")
buff["y data"] = y_data
test_list.append(buff)
score = get_estimator_performance(test_list,
score_dict,
cv_split)
# save scores to file
with open(kind+"."+fname, 'wb') as handle:
pickle.dump(score, handle)
def tabulate_results(fnames):
'''
reads the files and tabulates results
this is not a reusable function
many stuffs are hard coded
'''
for fname in fnames:
if fname == "classifier.score":
print("Comparison of Regressors\n")
# prep for 95% CI calculation
alpha = 0.05
z_val = norm.ppf(alpha/2)
# read scores from file
with open(fname, 'rb') as handle:
score = pickle.load(handle)
table = []
for dic in score:
table.append(
[dic["name"],
*dic["score"],
"(" +
f"{dic['score'][0]+z_val*np.sqrt(dic['score'][1]):.4f}" +
", " +
f"{dic['score'][0]-z_val*np.sqrt(dic['score'][1]):.4f}" +
")"])
print(tabulate(table,
headers=["Classifier",
"Mean(0/1 loss)",
"Var(Mean(0/1 loss))",
"95% Confidence Interval"],
tablefmt="latex",
floatfmt=".6f"))
print("\n\n")
if fname == "regressor.score":
print("Comparison of Regressors\n")
# prep for 95% CI calculation
alpha = 0.05
z_val = norm.ppf(alpha/2)
# read scores from file
with open(fname, 'rb') as handle:
score = pickle.load(handle)
table = []
for dic in score:
table.append(
[dic["name"],
*dic["score"],
"(" +
f"{dic['score'][0]+z_val*np.sqrt(dic['score'][1]):.4f}" +
", " +
f"{dic['score'][0]-z_val*np.sqrt(dic['score'][1]):.4f}" +
")"])
print(tabulate(table,
headers=["Regressor",
"Mean(SE)",
"Variance(MSE)",
"95% Confidence Interval"],
tablefmt="latex",
floatfmt=".6f"))
print("\n\n")
def get_p_table(fname):
'''
this function is only for regressor scores
it does the following
1. reads the given file
2. creates a table with p-value comparing every model
with every other model
3. p values are calculated using a std.normal as null distribution
and by taking 1-cdf(diff.means/sqrt(sum(var_means)))
'''
if fname == "regressor.score":
print("\n\nComparing Regressors")
elif fname == "classifier.score":
print("\n\nComparing Clasifiers")
else:
raise ValueError("cant process this file")
# hypothesis testing parameters
alpha = 0.05
print("Testing Hypotheses with alpha = "+str(alpha))
print("Alternative hypothesis is loss of row index < loss of column index")
with open(fname, 'rb') as handle:
score = pickle.load(handle)
# initialize the result list
result1 = []
result2 = []
header = [" "]
for dic1 in score:
# initialize the outer loop buffer
out_buff1 = []
out_buff2 = []
out_buff1.append(dic1["name"])
out_buff2.append(dic1["name"])
for dic2 in score:
# find p value
h_mean = dic2["score"][0] - dic1["score"][0] # subtract mean
h_sd = np.sqrt(dic1["score"][1] + dic2["score"][1]) # add var
p_val = 1-norm.cdf(h_mean/h_sd) # as mean as per H0 is 0
# test hypothesis
if p_val < alpha:
h_result = "T"
else:
h_result = "-"
in_buff1 = f"{p_val:.2f}"
in_buff2 = h_result
# add result to outer buffer
out_buff1.append(in_buff1)
out_buff2.append(in_buff2)
# add out buffer to result
result1.append(out_buff1)
result2.append(out_buff2)
# fill the header
header.append(dic1["name"])
print(tabulate(result1, headers=header, tablefmt="latex"))
print(tabulate(result2, headers=header, tablefmt="latex"))
if __name__ == "__main__":
if not (os.path.exists("classifier.score") and
os.path.exists("regressor.score")):
save_score("score")
# report results
tabulate_results(["classifier.score", "regressor.score"])
# perform hypothesis testing
get_p_table("regressor.score")
get_p_table("classifier.score")
<file_sep>/README.md
# Selected-Topics-Statistics
Statistical report done over the data set "Wine Quality Dataset".
The Wine Quality Dataset records, for different samples of white (winequality-white.csv)
and red wine (winequality-red.csv), the following variables:
1. fixed acidity = mass concentration of tartaric acid (g=dm3)
2. volatile acidity = mass concentration of acetic acid (g=dm3)
3. mass concentration of citric acid (g=dm3)
4. residual sugar mass concentration (g=dm3)
5. mass concentration of sodium chloride (g=dm3)
6. mass concentration of free sulfur dioxide (mg=dm3)
7. mass concentration of sulfur dioxide total (mg=dm3)
8. density (g=cm3)
9. pH value
10. mass concentration of potassium sulphate (mg=dm3)
11. alcohol content (vol%)
and additionally, a median sensory preference from up to three sensory assessors
(\sommeliers" in the scenario) following blind sensory assessment on a subjective
scale of 0 (disgusting) to 10 (excellent).
The main task was to complete a report for a fictional fodd AI-Start up (the final report correspond to the "Wine dataset report" pdf), which want us to answer the following questions:
1. Can we use the sommelier/wine data to create an AI with super-human per-
formance in wine tasting?
2. Which components of wine make a wine a good wine?
3. Can the AI use the data to create the perfect wine, i.e., wine whose quality
exceeds all that we have seen?
This report is divided in several different tasks:
A2: Joint the two dataset into a single one that contains the color as a feature.
A3: Perform exploratory analysis over the new data set
A4: Conduct predictive benchmarking experiments to determine:
(i) whether, and how well wine quality can be predicted from chemical composi-
tion and colour;
(ii) whether wine colour adds predictive power above chemical composition and
vice versa, in (i).
For the purpose of this report all the code was done in python using different libraries as sklearn and numpy.
<file_sep>/task_a3.py
'''
This file was actually converted from a notebook
contains code for EDA
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pandas.plotting import scatter_matrix
from tabulate import tabulate
import task_a2
import task_a5
def main():
'''
main function like in C
'''
# toggle for saving (and showing) images
save = False
# get the wine data frame (wdf)
wdf = task_a2.get_data("winequality")
# Numerical Summaries
header = [" ", "count", "mean", "std", "min", "25%", "50%", "75%", "max"]
print(tabulate(wdf.describe().T, headers=header, tablefmt="latex"))
# plot histograms
if save is True:
plt.figure(figsize=(20, 5))
filter_list = wdf.color != ""
for i, w_color in enumerate(["", "red", "white"]):
if i > 0:
filter_list = wdf.color == w_color
plt.subplot(1, 3, i+1)
plt.hist(wdf[filter_list].quality,
bins=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
align="left")
plt.title(str(w_color)+" wine quality histogram")
plt.xlabel("Wine Quality")
plt.ylabel("Count")
plt.savefig("histogram.jpg")
plt.show()
# Make bar plot
if save is True:
i = 1
fig = plt.figure(figsize=(10, 10))
for feature in wdf:
if feature != "color":
ax = fig.add_subplot(4, 3, i)
ax.boxplot([wdf[feature][wdf.color == "red"],
wdf[feature][wdf.color == "white"]])
plt.setp(ax, xticklabels=['red', 'white'])
ax.set_ylabel(feature)
i += 1
plt.tight_layout()
plt.savefig("barplot.jpg")
plt.show()
if save is True:
# matrix scatter plot
# color coding for wine
color_dict = {"red":"red", "white":"blue"}
# getting the color list
color_list = [color_dict[c] for c in wdf.color]
# matrix scatter plot
scatter_matrix(wdf, alpha=0.01, figsize=(20, 30),
diagonal='density', color=color_list)
plt.savefig("mat_scatter.jpg")
plt.show()
# log transformation
transform_list = [
'fixed acidity',
'citric acid',
'volatile acidity',
'residual sugar',
'chlorides',
'free sulfur dioxide',
'total sulfur dioxide',
'sulphates',
]
print("Least count")
np.min(wdf["citric acid"][wdf["citric acid"] > 0])
lwdf = wdf.copy()
# put a shift (0.5*leastcount) on citric_acid (10e-2 is the typical LC)
lwdf["citric acid"] = lwdf["citric acid"]+10e-2*0.5
lwdf[transform_list] = np.log(lwdf[transform_list])
print(tabulate(lwdf.describe().T, headers=header, tablefmt="latex"))
if save is True:
# log - matrix scatter plot
# color coding for wine
color_dict = {"red": "red", "white": "blue"}
# getting the color list
color_list = [color_dict[c] for c in lwdf.color]
# matrix scatter plot
scatter_matrix(lwdf, alpha=0.01, figsize=(20, 30), diagonal='density', color=color_list)
plt.savefig("log_mat_scatter.jpg")
plt.show()
# rough calculations for quantitative EDA
from sklearn.metrics import silhouette_score
print("Red wine mean quality: ", np.mean(wdf["quality"][wdf.color == "red"]))
print("White wine mean quality: ", np.mean(wdf["quality"][wdf.color == "white"]))
print("Red wine with quality more than 8: ", len(wdf["quality"][np.logical_and(wdf.quality > 8, wdf.color == "red")]))
twdf = pd.get_dummies(wdf)
print("Silhoutte score as per color labels", silhouette_score(twdf.iloc[:,0:-2], twdf.iloc[:,-1]))
print("Silhoutte score as per random labels", silhouette_score(twdf.iloc[:,0:-2], np.random.randint(0,2, size=(twdf.shape[0]))))
result = []
header = [" "]
for i in lwdf.columns[1:-1]:
buf = [i]
header.append(i)
for j in lwdf.columns[1:-1]:
buf.append(np.corrcoef(lwdf[i], lwdf[j])[1,0])
result.append(buf)
print(tabulate(result, headers=header, tablefmt="latex", floatfmt="0.3f"))
# comparing red and white wine
task_a5.do_htest(wdf[wdf.color == "red"], wdf[wdf.color == "white"], label1="R>W:", label2="R<W:")
if __name__ == "__main__":
main()
<file_sep>/task_a2.py
'''
This file contains code for importing data
'''
# necessary modules
import os
import pandas as pd
def get_data(folder, colors=("white", "red")):
'''
locates the following files ( by default) in the folder taken as argument
winequality-white.csv
winequality-red.csv
The data on quality of red-wine and white-wine are aggregated
into a super data set. This is returned as a pandas dataframe
Optionally colors can also be given as an argument
'''
# initialize pandas df to hold the dataset
wine_df = pd.DataFrame()
# read data for each color and append
for color in colors:
# get the file paths
file_path = os.path.join(folder,
"winequality-"+color+".csv")
# import the CSVs as pandas dataframe
temp_df = pd.read_csv(file_path, sep=";")
# add the color variable
temp_df.loc[:, "color"] = color
# append to the existing datatrame
wine_df = wine_df.append(temp_df,
ignore_index=True,
verify_integrity=True)
# rearrange to make quality the first column
wine_df = wine_df[["quality"] +
[col for col in wine_df if col != "quality"]]
# return the aggregated dataframe
return wine_df
if __name__ == "__main__":
'''
run testing code if ran as a script
'''
WINE_DF = get_data("winequality")
# check shape
if WINE_DF.shape == (6497, 13):
print("The shape of the DF is as expected")
else:
print("Shape test FAILED")
# check red count
if WINE_DF.color[WINE_DF.color == "red"].count() == 1599:
print("Red wine count is as expected")
else:
print("Red count test FAILED")
# check white count
if WINE_DF.color[WINE_DF.color == "white"].count() == 4898:
print("White wine count is as expected")
else:
print("White count test FAILED")
|
7a62a27ba7ce93b8c9c34c8090558c8f6dc845b9
|
[
"Markdown",
"Python"
] | 5 |
Python
|
ELTono/Selected-Topics-Statistics
|
fa6de6c1e2668cc748207352d595baf637382a79
|
e696b6083ff0b7d76c688455271190502653c944
|
refs/heads/master
|
<repo_name>viocax/stateMachine<file_sep>/stateMachine/Models.swift
//
// Models.swift
// stateMachine
//
// Created by <NAME> on 2020/5/15.
// Copyright ยฉ 2020 com.jie.stateMachine. All rights reserved.
//
import Foundation
struct Models<T: Codable>: Codable {
let results: T
}
struct Info: Codable {
var email: String?
var gender: String?
}
<file_sep>/stateMachine/State/LoadingState.swift
//
// LoadingState.swift
// stateMachine
//
// Created by <NAME> on 2020/5/15.
// Copyright ยฉ 2020 com.jie.stateMachine. All rights reserved.
//
import Foundation
import Combine
import GameplayKit
class LoadingState: ViewControllerState {
enum ErrorCase: Error {
case noData, missURL, someError(Error)
}
private var cannel: Set<AnyCancellable> = Set<AnyCancellable>()
private var indicatorView: UIActivityIndicatorView = {
let indicatorView = UIActivityIndicatorView(style: UIActivityIndicatorView.Style.medium)
indicatorView.translatesAutoresizingMaskIntoConstraints = false
return indicatorView
}()
private var userCase: UserCase?
convenience init(_ viewController: ViewController, userCase: UserCase) {
self.init(viewController)
self.userCase = userCase
}
override func didEnter(from previousState: GKState?) {
view.addSubview(indicatorView)
indicatorView.centerYAnchor.constraint(equalTo: view.centerYAnchor).isActive = true
indicatorView.centerXAnchor.constraint(equalTo: view.centerXAnchor).isActive = true
indicatorView.startAnimating()
do {
let task = try userCase?.fetchData(Models<[Info]>.self)
task?.sink(receiveCompletion: { [weak self] completion in
switch completion {
case .finished:
print("finished")
case .failure(let error):
DispatchQueue.main.async {
self?.stateMachine?.state(forClass: ErrorState.self)?.inject(object: error)
self?.stateMachine?.enter(ErrorState.self)
}
}
}, receiveValue: { [weak self] (model) in
DispatchQueue.main.async {
self?.stateMachine?.state(forClass: DataState.self)?.inject(object: model.results)
self?.stateMachine?.enter(DataState.self)
}
}).store(in: &cannel)
} catch {
stateMachine?.state(forClass: ErrorState.self)?.inject(object: error)
stateMachine?.enter(ErrorState.self)
}
}
override func willExit(to nextState: GKState) {
self.indicatorView.stopAnimating()
self.indicatorView.removeFromSuperview()
}
}
<file_sep>/stateMachine/Utility/InjectionHandler.swift
//
// InjectionHandler.swift
// stateMachine
//
// Created by <NAME> on 2020/5/15.
// Copyright ยฉ 2020 com.jie.stateMachine. All rights reserved.
//
import Foundation
protocol InjectionHandler {
associatedtype Object
func inject(object: Object)
}
<file_sep>/stateMachine/State/DataState.swift
//
// DataState.swift
// stateMachine
//
// Created by <NAME> on 2020/5/15.
// Copyright ยฉ 2020 com.jie.stateMachine. All rights reserved.
//
import UIKit
import GameplayKit
class DataState: ViewControllerState, InjectionHandler {
private var info: [Info] = []
func inject(object: [Info]) {
self.info = object
}
private lazy var tableView: UITableView = {
let view = UITableView()
view.translatesAutoresizingMaskIntoConstraints = false
view.delegate = self
view.dataSource = self
view.register(UITableViewCell.self, forCellReuseIdentifier: "UITableViewCell")
return view
}()
override func didEnter(from previousState: GKState?) {
view.addSubview(tableView)
tableView.topAnchor.constraint(equalTo: view.topAnchor).isActive = true
tableView.bottomAnchor.constraint(equalTo: view.bottomAnchor).isActive = true
tableView.leadingAnchor.constraint(equalTo: view.leadingAnchor).isActive = true
tableView.trailingAnchor.constraint(equalTo: view.trailingAnchor).isActive = true
tableView.reloadData()
}
override func willExit(to nextState: GKState) {
tableView.removeFromSuperview()
}
}
extension DataState: UITableViewDataSource, UITableViewDelegate {
func tableView(_ tableView: UITableView, numberOfRowsInSection section: Int) -> Int {
return info.count
}
func tableView(_ tableView: UITableView, cellForRowAt indexPath: IndexPath) -> UITableViewCell {
let cell = tableView.dequeueReusableCell(withIdentifier: "UITableViewCell", for: indexPath)
cell.textLabel?.text = info[indexPath.row].email
return cell
}
}
<file_sep>/stateMachine/State/ErrorState.swift
//
// ErrorState.swift
// stateMachine
//
// Created by <NAME> on 2020/5/15.
// Copyright ยฉ 2020 com.jie.stateMachine. All rights reserved.
//
import Foundation
import GameplayKit
class ErrorState: ViewControllerState, InjectionHandler {
private var error: Error?
func inject(object: Error) {
self.error = object
}
override func didEnter(from previousState: GKState?) {
guard let error = error else {
return
}
let alert = UIAlertController(title: "error", message: error.localizedDescription, preferredStyle: .alert)
let okAction = UIAlertAction(title: "ok", style: .default, handler: { [weak self] _ in
self?.stateMachine?.enter(EmptyState.self)
})
alert.addAction(okAction)
viewController.present(alert, animated: true, completion: nil)
}
override func willExit(to nextState: GKState) {
}
}
<file_sep>/stateMachine/Utility/UserCase.swift
//
// UserCase.swift
// stateMachine
//
// Created by <NAME> on 2020/5/15.
// Copyright ยฉ 2020 com.jie.stateMachine. All rights reserved.
//
import Combine
import Foundation
class UserCase {
private let urlString: String
init(_ urlString: String) {
self.urlString = urlString
}
func fetchData<T: Codable>(_ model: T.Type) throws -> AnyPublisher<T, Error> {
guard let url = URL(string: urlString) else {
throw LoadingState.ErrorCase.missURL
}
return URLSession.shared.dataTaskPublisher(for: url)
.map(\.data)
.mapError { $0 as Error }
.decode(type: T.self, decoder: JSONDecoder())
.eraseToAnyPublisher()
}
}
<file_sep>/stateMachine/ViewController.swift
//
// ViewController.swift
// stateMachine
//
// Created by <NAME> on 2020/5/15.
// Copyright ยฉ 2020 com.jie.stateMachine. All rights reserved.
//
import UIKit
import GameplayKit
class ViewController: UIViewController {
var stateMachine: GKStateMachine?
override func viewDidLoad() {
super.viewDidLoad()
stateMachine = GKStateMachine(states: [
EmptyState(self),
LoadingState(self, userCase: UserCase("https://randomuser.me/api/?results=5")),
ErrorState(self),
DataState(self)
])
stateMachine?.enter(EmptyState.self)
}
}
class ViewControllerState: GKState {
unowned let viewController: ViewController
var view: UIView { viewController.view }
init(_ viewController: ViewController) {
self.viewController = viewController
viewController.view.backgroundColor = .white
}
}
<file_sep>/stateMachine/State/EmptyState.swift
//
// EmptyState.swift
// stateMachine
//
// Created by <NAME> on 2020/5/15.
// Copyright ยฉ 2020 com.jie.stateMachine. All rights reserved.
//
import UIKit
import GameplayKit
class EmptyState: ViewControllerState {
var emptyView: UIView = {
let view = UIView()
view.backgroundColor = .red
return view
}()
override func didEnter(from previousState: GKState?) {
view.addSubview(emptyView)
emptyView.translatesAutoresizingMaskIntoConstraints = false
emptyView.topAnchor.constraint(equalTo: view.topAnchor).isActive = true
emptyView.leadingAnchor.constraint(equalTo: view.leadingAnchor).isActive = true
emptyView.trailingAnchor.constraint(equalTo: view.trailingAnchor).isActive = true
emptyView.bottomAnchor.constraint(equalTo: view.bottomAnchor).isActive = true
let button = UIButton(type: .system)
emptyView.addSubview(button)
button.translatesAutoresizingMaskIntoConstraints = false
button.centerYAnchor.constraint(equalTo: view.centerYAnchor).isActive = true
button.centerXAnchor.constraint(equalTo: view.centerXAnchor).isActive = true
button.addTarget(self, action: #selector(retry), for: .touchUpInside)
button.setTitle("start", for: .normal)
}
override func willExit(to nextState: GKState) {
emptyView.removeFromSuperview()
}
}
extension EmptyState {
@objc func retry() {
stateMachine?.enter(LoadingState.self)
}
}
|
077ee5a51075fcefb61d7f7aca09f28c3ff8a3d9
|
[
"Swift"
] | 8 |
Swift
|
viocax/stateMachine
|
7b0fba250e048bd6dba9e24562e3f33841fd2263
|
a44cf8b4428593fdc258f9f44524145fdd74f657
|
refs/heads/master
|
<file_sep># SnakeGame
This is a simple snake game that works the same as the classic snake game most people have played.
### Running the Game
You can play the game by downloading the build folder and running the main.exe file.
<file_sep>import pygame
import numpy as np
from snake import Snake
from food import Food
# pygame window setup
pygame.init()
WINDOW_WIDTH = 255
WINDOW_HEIGHT = 255
window = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
pygame.display.set_caption('Snake Game')
# grid variables
GRID_WIDTH = 20
GRID_HEIGHT = 20
GRID_MARGIN = 5
GRID_LENGTH = 10
grid = []
# Food
food = Food()
# clock
FPS = 4
clock = pygame.time.Clock()
# colors
WHITE = (255,255,255)
BLACK = (0,0,0)
LIGHT_GREY = (211,211,211)
GREEN = (0,255,0)
class DirectionChange:
def __init__(self, row, col, direction):
self.row_ = row
self.col_ = col
self.direction_ = direction
def CreateGrid(player):
for row in range(GRID_LENGTH):
grid.append([])
for col in range(GRID_LENGTH):
grid[row].append(0)
# initialize where snake and food is
for i in range(len(player)):
grid[player[i].row_][player[i].col_] = 1
grid[food.row_][food.col_] = 2
def DisplayBoard(player):
CreateGrid(player)
for row in range(GRID_LENGTH):
for col in range(GRID_LENGTH):
color = BLACK
if grid[row][col] == 1:
color = player[0].color_
elif grid[row][col] == 2:
color = food.color_
pygame.draw.rect(window, color, [(GRID_MARGIN + GRID_WIDTH) * col + GRID_MARGIN, (GRID_MARGIN + GRID_HEIGHT) * row + GRID_MARGIN, GRID_WIDTH, GRID_HEIGHT])
def RunProgram():
# handles direction changes
dir_changes = []
remove_dir = 0
# keep track of the points
points = 0
# the snake
player = []
player.append(Snake(0,0))
run = True
while run:
# get events in the game
for event in pygame.event.get():
# check if exit
if event.type == pygame.QUIT:
run = False
pygame.quit()
# check key presses
if event.type == pygame.KEYDOWN:
# left arrow
if event.key == pygame.K_LEFT:
change = DirectionChange(player[0].row_, player[0].col_, 'left')
player[0].set_direction('left')
dir_changes.append(change)
# right arrow
if event.key == pygame.K_RIGHT:
change = DirectionChange(player[0].row_, player[0].col_, 'right')
player[0].set_direction('right')
dir_changes.append(change)
# down arrow
if event.key == pygame.K_DOWN:
change = DirectionChange(player[0].row_, player[0].col_, 'down')
player[0].set_direction('down')
dir_changes.append(change)
# up arrow
if event.key == pygame.K_UP:
change = DirectionChange(player[0].row_, player[0].col_, 'up')
player[0].set_direction('up')
dir_changes.append(change)
# make background light grey
window.fill(LIGHT_GREY)
# create and display the board
DisplayBoard(player)
# make snake move constantly
for snake_seg in player:
temp_row, temp_col = snake_seg.row_, snake_seg.col_
snake_seg.Move()
# if the snake actually moved
if snake_seg.row_ != temp_row or snake_seg.col_ != temp_col:
grid[temp_row][temp_col] = 0
# after move check if the head of snake hit food
if player[0].row_ == food.row_ and player[0].col_ == food.col_:
points += 1
snake_length = len(player)
new_segment = Snake(0,0)
player[snake_length-1].AddSegment(new_segment)
player.append(new_segment)
food.ChangeLocation()
# after move check if snake ran into itself
if len(player) > 1:
for i in range(1, len(player)):
if player[0].row_ == player[i].row_ and player[0].col_ == player[i].col_:
run = False
# check if segment direction needs to change
for change_class in dir_changes:
for i in range(len(player)):
if player[i].row_ == change_class.row_ and player[i].col_ == change_class.col_:
# change direction of that segment
player[i].set_direction(change_class.direction_)
if i == len(player)-1:
remove_dir += 1
# remove the direction from list of directions
while remove_dir > 0:
dir_changes.pop(0)
remove_dir -= 1
# check if segment direction needs to change
#for snake_seg in player:
# if snake_seg.row_ == row_change and snake_seg.col_ == col_change:
# snake_seg.set_direction(dir_change)
# update the window
pygame.display.flip()
# tick the clock
clock.tick(FPS)
return points
def Menu(is_end, points):
run = True
new_game = False
end_font = pygame.font.Font('freesansbold.ttf', 25)
start_font = pygame.font.Font('freesansbold.ttf', 15)
# start menu text
start_text = start_font.render('Press Space Bar to Start Game!', True, WHITE)
start_text_rect = start_text.get_rect()
start_text_rect.center = (WINDOW_WIDTH // 2, WINDOW_HEIGHT // 2)
while run:
# get events
for event in pygame.event.get():
# if the x is pressed
if event.type == pygame.QUIT:
run = False
pygame.quit()
# if a new game is created
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
new_game = True
run = False
# make background black
window.fill(BLACK)
# if the game ended
if is_end:
# generate points
end_points = end_font.render('Points: {}'.format(points), True, WHITE)
end_points_rect = end_points.get_rect()
end_points_rect.center = (WINDOW_WIDTH // 2, WINDOW_HEIGHT // 2 - 50)
# generate play again text
end_text = start_font.render('Press Space Bar to Play Again!', True, WHITE)
end_text_rect = end_text.get_rect()
end_text_rect.center = (WINDOW_WIDTH // 2, WINDOW_HEIGHT // 2)
# display the points to user
window.blit(end_points, end_points_rect)
window.blit(end_text, end_text_rect)
# if the game hasn't been played yet
else:
# display the start menu
window.blit(start_text, start_text_rect)
# update the window
pygame.display.flip()
if new_game:
points = RunProgram()
Menu(True, points)
def main():
Menu(False, 0)
pygame.quit()
if __name__=='__main__':
main()
<file_sep>from random import randrange
# colors
GREEN = (0,255,0)
class Food:
def __init__(self):
self.row_ = randrange(10)
self.col_ = randrange(10)
self.color_ = GREEN
def ChangeLocation(self):
self.row_ = randrange(10)
self.col_ = randrange(10)<file_sep>
# colors
RED = (255,0,0)
# grid variables
GRID_WIDTH = 20
GRID_HEIGHT = 20
GRID_MARGIN = 5
GRID_LENGTH = 10
class Snake:
def __init__(self, row, col):
self.row_ = row
self.col_ = col
self.color_ = RED
self.direction_ = 'right'
def set_row(self, row):
if row < GRID_LENGTH and row >= 0:
self.row_ = row
def set_col(self, col):
if col < GRID_LENGTH and col >= 0:
self.col_ = col
def set_direction(self, direction):
direction = direction.lower()
directions = ['right', 'left', 'down', 'up']
if direction in directions:
self.direction_ = direction
def Move(self):
if self.direction_ == 'right' and self.col_ < GRID_LENGTH-1:
self.col_ += 1
elif self.direction_ == 'left' and self.col_ > 0:
self.col_ -= 1
elif self.direction_ == 'down' and self.row_ < GRID_LENGTH-1:
self.row_ += 1
elif self.direction_ == 'up' and self.row_ > 0:
self.row_ -= 1
def AddSegment(self, new_snake):
# set direction of segment
new_snake.set_direction(self.direction_)
# set row and col of segment
if self.direction_ == 'right':
new_snake.set_row(self.row_)
new_snake.set_col(self.col_ - 1)
elif self.direction_ == 'left':
new_snake.set_row(self.row_)
new_snake.set_col(self.col_ + 1)
elif self.direction_ == 'down':
new_snake.set_row(self.row_ - 1)
new_snake.set_col(self.col_)
elif self.direction_ == 'up':
new_snake.set_row(self.row_ + 1)
new_snake.set_col(self.col_)
|
618909edf9941a65cf5494559c1a27c824584a65
|
[
"Markdown",
"Python"
] | 4 |
Markdown
|
kylebremont/SnakeGame
|
a1a86c51ee3ab52c875d2d5bca0ce591f97370c1
|
c7bef97b4c5bf41f18ceb3447cd3bba40b811f1c
|
refs/heads/master
|
<file_sep>๏ปฟusing System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace Reducio.Core
{
public class IncidentType
{
public string Id { get; set; }
public string Type { get; set; }
public DateTime DateIdentified { get; set; }
public string Description { get; set; }
public string Resolution { get; set; }
public IncidentType() : this(string.Empty, DateTime.Now, string.Empty, string.Empty) { }
public IncidentType(string type, DateTime dateIdentified, string description, string resolution)
{
this.Type = type;
this.DateIdentified = dateIdentified;
this.Description = description;
this.Resolution = Resolution;
}
}
}
<file_sep>๏ปฟusing System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Reducio.Data;
using Reducio.Utils;
using Newtonsoft.Json;
namespace Reducio.Core
{
public class ErrorLoggingController
{
private DateTime unspecifiedIncidentTypeDate;
public ErrorLoggingController()
{
this.unspecifiedIncidentTypeDate = new DateTime(2012, 8, 1);
}
public Incident GetIncident(string id, RavenDataController dataController)
{
Enforce.That(string.IsNullOrEmpty(id) == false,
"ErrorLoggingController.GetIncident - id can not be null");
return dataController.Get<Incident>(id);
}
/// <summary>
/// Accept a json representation of an incident, deserialize, locate a potential parent incident
/// and related incidents, and save to document storeage
/// </summary>
/// <param name="jsonIncident">Incident serialized as Json string</param>
/// <param name="dataController">Document storeage as RavenDataController</param>
/// <returns>Logged Incident Id as string</returns>
public string LogIncident(string jsonIncident, RavenDataController dataController)
{
Enforce.That(string.IsNullOrEmpty(jsonIncident) == false,
"ErrorLoggingController.GetIncident - jsonIncident can not be null");
var newIncident = JsonConvert.DeserializeObject<Incident>(jsonIncident);
return LogIncident(newIncident, dataController);
}
/// <summary>
/// Accept an Incident object, locate a potential parent incident
/// and related incidents, and save to document storeage
/// </summary>
/// <param name="incident">The new Incident object</param>
/// <param name="dataController">Document storeage as RavenDataController</param>
/// <returns>Logged Incident Id as string</returns>
public string LogIncident(Incident incident, RavenDataController dataController)
{
// When the minimum data is missing catalog this as unserviceable then save
if (incident.CanIncidentBeLogged() == false)
{
incident.Title = "Unspecified error!";
incident.IncidentDateTime = DateTime.Now;
incident.Catalogged = false;
incident.PageName = "No page name!";
var unspecifiedIncidentType = new IncidentType("Unspecified", DateTime.Now, "Error logging did not capture results",
"Gather more data");
incident.IncidentType = unspecifiedIncidentType;
}
// Has this occurred before? Associate to parent / primary occurence
var parentIncident = FindParent(incident.HashedTitle, dataController);
if (string.IsNullOrEmpty(parentIncident.Id) == false)
{
incident.ParentIncidentId = parentIncident.Id;
incident.RelatedIncidents = parentIncident.RelatedIncidents;
incident.Resolved = parentIncident.Resolved;
incident.Catalogged = parentIncident.Catalogged;
}
dataController.Save<Incident>(incident);
return incident.Id;
}
/// <summary>
///
/// </summary>
/// <param name="id"></param>
/// <param name="repository"></param>
public void DeleteIncident(string id, RavenDataController dataController)
{
Enforce.That(string.IsNullOrEmpty(id) == false,
"ErrorLoggingController.DeleteIncident - id can not be null");
dataController.Delete<Incident>(id);
}
/// <summary>
/// Given a hash of an incident title, find the very first occurence of an incident with the
/// same hashed title.
/// </summary>
/// <param name="hashTitle">Title hash as string</param>
/// <param name="repository">Document storage as RavenDataController</param>
/// <returns>On sucess, the parent incident. On failure a new Incident with no Id</returns>
public Incident FindParent(string hashTitle, RavenDataController dataController)
{
Enforce.That(string.IsNullOrEmpty(hashTitle) == false, "ErrorLoggingController.FindParent - hashTitle can not be null");
var results = dataController.GetAllWhere<Incident>(x => x.HashedTitle == hashTitle)
.ToList<Incident>();
if((results == null) | (results.Count == 0))
{
return new Incident();
}
var parent = results.Aggregate((item, comp) => item.IncidentDateTime < comp.IncidentDateTime ? item : comp);
if (parent == null)
{
return new Incident();
}
return parent;
}
public List<Incident> GetParentIncidents(RavenDataController dataController)
{
return dataController.GetAllWhere<Incident>(x => string.IsNullOrEmpty(x.ParentIncidentId));
}
}
}
<file_sep>๏ปฟusing System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
using System.Web.Services;
using TestWeb.DomainModel;
using Reducio.Data;
namespace TestWeb.Services
{
/// <summary>
/// Summary description for APoorWebService
/// </summary>
[WebService(Namespace = "http://tempuri.org/")]
[WebServiceBinding(ConformsTo = WsiProfiles.BasicProfile1_1)]
[System.ComponentModel.ToolboxItem(false)]
// To allow this Web Service to be called from script, using ASP.NET AJAX, uncomment the following line.
//[System.Web.Script.Services.ScriptService]
public class APoorWebService : System.Web.Services.WebService
{
[WebMethod]
public string GetEmployee(string id)
{
var employeeController = new EmployeeController();
return employeeController.Get(id);
}
[WebMethod]
public string GetAllEmployees()
{
var employeeController = new EmployeeController();
return employeeController.GetAll();
}
[WebMethod]
public void DeleteEmployee(string id)
{
var employeeController = new EmployeeController();
employeeController.Delete(id);
}
[WebMethod]
public string CreateEmployee(string jsonEmployee)
{
var employeeController = new EmployeeController();
return employeeController.CreateEmployee(jsonEmployee);
}
}
}
<file_sep>๏ปฟusing System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Linq;
using System.Linq.Expressions;
using System.Reflection;
using System.Text.RegularExpressions;
namespace Reducio.Utils
{
/// <summary>
/// General exception class for the <see cref="PredicateConstructor{TArg}"/>.
/// </summary>
public class PredicateConstructorException : Exception {
/// <summary>
/// Create a PredicateBuilderException that contains no message.
/// </summary>
public PredicateConstructorException() {
}
/// <summary>
/// Create a PredicateBuilderException object with a syntax error message, see
/// Message in the base class to get the syntax error back.
/// </summary>
/// <param name="message">Syntax error that was encountered.</param>
public PredicateConstructorException(string message)
: base(message) {
}
}
/// <summary>
/// Expression parser for a super simple grammar, see http://www.willamette.edu/~fruehr/348/lab3.html for more on
/// grammars and parsing.
///
/// Code created by <NAME> - http://scottgarland.com/Post/Display/Dynamically_Creating_LINQ_Expression_Predicates_From_A_String
/// </summary>
/// <typeparam name="TArg">The type of the arg.</typeparam>
public class PredicateConstructor<TArg> {
private static readonly Regex RegexBooleanNot = new Regex(@"^(\!)");
private static readonly Regex RegexBooleanOp = new Regex(@"^(\|\||\&\&|\!)");
private static readonly Regex RegexKeyword = new Regex(@"^(?<value>[A-Za-z0-9_]+)");
private static readonly Regex RegexQoute = new Regex(@"^[""']");
private static readonly Regex RegexQuotedValue = new Regex(@"([""'])(?:(?=(\\?))\2.)*?\1");
private static readonly Regex RegexRelationalOp = new Regex(@"^(==|<>|!=|<=?|>=?)");
private static readonly Regex RegexValue = new Regex(@"^(?<value>[^""',\s\)]+)");
private TokenizerString _filterSource;
/// <summary>
/// Initializes a new instance of the <see cref="PredicateConstructor<TArg>"/> class.
/// </summary>
public PredicateConstructor() {
}
/// <summary>
/// Initializes a new instance of the <see cref="PredicateConstructor<TArg>"/> class.
/// </summary>
/// <param name="expression">The expression.</param>
public PredicateConstructor(string expression) {
Predicate = Compile(expression);
}
/// <summary>
/// Gets the compiled predicate, null if an expression has not been successfully compiled.
/// </summary>
/// <value>The predicate.</value>
public Func<TArg, bool> Predicate { get; private set; }
/// <summary>
/// Compiles the specified expression and creates a predicate that can be accessed by
/// the <see cref="Predicate">Predicate</see> property. Exceptions are thrown for syntax
/// errors and illegal type operations.
/// </summary>
/// <param name="expression">The expression.</param>
/// <returns></returns>
public Func<TArg, bool> Compile(string expression) {
ParameterExpression param = Expression.Parameter(typeof (TArg), "o");
ParsedExpressionNode root = ParseExpression(expression);
Expression condition = ConvertToExpression(param, root);
Expression<Func<TArg, bool>> myLambda = Expression.Lambda<Func<TArg, bool>>(condition, param);
Func<TArg, bool> z = myLambda.Compile();
Predicate = z.Invoke;
return Predicate;
}
public Expression<Func<TArg, bool>> CompileToExpression(string expression)
{
ParameterExpression param = Expression.Parameter(typeof(TArg), "o");
ParsedExpressionNode root = ParseExpression(expression);
Expression condition = ConvertToExpression(param, root);
Expression<Func<TArg, bool>> myLambda = Expression.Lambda<Func<TArg, bool>>(condition, param);
return myLambda;
}
/// <summary>
/// Parses the expression.
/// </summary>
/// <param name="expression">The expression.</param>
/// <returns>Parse tree root</returns>
private ParsedExpressionNode ParseExpression(string expression) {
if (string.IsNullOrEmpty(expression)) {
throw new InvalidOperationException();
}
_filterSource = new TokenizerString(expression);
ParsedExpressionNode expressionTree = ParseExpression();
return expressionTree;
}
/// <summary>
/// Checks for valid propery or field in the type we are building an predicate function for.
/// </summary>
/// <param name="name">The name.</param>
/// <returns>True if the name is a property or field.</returns>
private static bool CheckForValidProperyOrField(string name) {
try {
return GetProperyOrFieldType(name) != null;
}
catch (PredicateConstructorException) {
return false;
}
}
/// <summary>
/// Gets the type of the propery or field.
/// </summary>
/// <param name="name">The name.</param>
/// <returns>Type object.</returns>
private static Type GetProperyOrFieldType(string name) {
PropertyInfo pi = (from p in typeof (TArg).GetProperties() where p.Name.Equals(name) select p).FirstOrDefault();
if (pi == null) {
FieldInfo fi = (from p in typeof (TArg).GetFields() where p.Name.Equals(name) select p).FirstOrDefault();
if (fi != null) {
return fi.FieldType;
}
throw new PredicateConstructorException("Can only build expression using Properties or Fields of objects");
}
return pi.PropertyType;
}
/// <summary>
/// Determines whether if the name is a field in the TArg type.
/// </summary>
/// <param name="name">The name.</param>
/// <returns>
/// <c>true</c> if name is a field; otherwise, <c>false</c>.
/// </returns>
private static bool IsFieldType(string name) {
return (from p in typeof (TArg).GetFields() where p.Name.Equals(name) select p).FirstOrDefault() != null;
}
/// <summary>
/// Determines whether name is a property in the TArg type.
/// </summary>
/// <param name="name">The name.</param>
/// <returns>
/// <c>true</c> if name is a property; otherwise, <c>false</c>.
/// </returns>
private static bool IsPropertyType(string name) {
return (from p in typeof (TArg).GetProperties() where p.Name.Equals(name) select p).FirstOrDefault() != null;
}
/// <summary>
/// Determines whether type is a Nullable<> type.
/// </summary>
/// <param name="theType">The type.</param>
/// <returns>
/// <c>true</c> if theType is a Nullable<> type; otherwise, <c>false</c>.
/// </returns>
private static bool IsNullableGenericType(Type theType) {
return (theType.IsGenericType && theType.GetGenericTypeDefinition().Equals(typeof (Nullable<>)));
}
/// <summary>
/// Converts the parse tree to an <see cref="Expression" />.
/// </summary>
/// <param name="parameter">The base object Expression.Parameter.</param>
/// <param name="root">The root of the parse tree.</param>
/// <returns>An <see cref="Expression{TDelegate}"/> that can be used to create runtime predicate.</returns>
private static Expression ConvertToExpression(Expression parameter, ParsedExpressionNode root) {
var binaryRoot = root as ParsedExpressionBinaryOperatorNode;
if (binaryRoot != null && binaryRoot.Left is ParsedExpressionNameNode) {
var left = (ParsedExpressionNameNode) binaryRoot.Left;
var right = (ParsedExpressionValueNode) binaryRoot.Right;
var op = (ParsedExpressionRelationalOperatorNode) binaryRoot;
Expression target;
if (IsFieldType(left.Name)) {
target = Expression.Field(parameter, left.Name);
}
else if (IsPropertyType(left.Name)) {
target = Expression.Property(parameter, left.Name);
}
else {
throw new PredicateConstructorException("Can only build expression using Properties or Fields of objects");
}
Type targetType = GetProperyOrFieldType(left.Name);
if (!targetType.IsValueType && !right.Quoted && right.Value.Equals("null")) {
if (op.Operator == ParsedExpressionRelationalOperatorNode.RelationalOperator.EqualsOp) {
return Expression.Equal(target, Expression.Constant(null));
}
if (op.Operator == ParsedExpressionRelationalOperatorNode.RelationalOperator.NotEqualsOp) {
return Expression.NotEqual(target, Expression.Constant(null));
}
throw new PredicateConstructorException("Can only use equality operators when comparing NULL values");
}
Expression val;
if (IsNullableGenericType(targetType)) {
var c = new NullableConverter(targetType);
if (right.Value.Equals("null")) {
val = Expression.Constant(null);
}
else {
object value = c.ConvertFrom(right.Value);
val = Expression.Constant(value);
}
}
else {
TypeConverter converter = TypeDescriptor.GetConverter(targetType);
if (!converter.CanConvertFrom(typeof (string))) {
throw new PredicateConstructorException("Can not convert expression value to property/field type");
}
object value = converter.ConvertFrom(right.Value);
val = Expression.Constant(value);
}
switch (op.Operator) {
case ParsedExpressionRelationalOperatorNode.RelationalOperator.EqualsOp:
return Expression.Equal(target, val);
case ParsedExpressionRelationalOperatorNode.RelationalOperator.NotEqualsOp:
return Expression.NotEqual(target, val);
case ParsedExpressionRelationalOperatorNode.RelationalOperator.LessThanOp:
return Expression.LessThan(target, val);
case ParsedExpressionRelationalOperatorNode.RelationalOperator.LessThanEqualsOp:
return Expression.LessThanOrEqual(target, val);
case ParsedExpressionRelationalOperatorNode.RelationalOperator.GreaterThanOp:
return Expression.GreaterThan(target, val);
case ParsedExpressionRelationalOperatorNode.RelationalOperator.GreaterThanEqualsOp:
return Expression.GreaterThanOrEqual(target, val);
}
}
if (root.GetType() == typeof (ParsedExpressionBooleanOperatorNode)) {
var op = root as ParsedExpressionBooleanOperatorNode;
switch (op.Operator) {
case ParsedExpressionBooleanOperatorNode.BooleanOperator.AndOp:
return Expression.And(ConvertToExpression(parameter, op.Left), ConvertToExpression(parameter, op.Right));
case ParsedExpressionBooleanOperatorNode.BooleanOperator.OrOp:
return Expression.Or(ConvertToExpression(parameter, op.Left), ConvertToExpression(parameter, op.Right));
case ParsedExpressionBooleanOperatorNode.BooleanOperator.NotOp:
return Expression.Not(ConvertToExpression(parameter, op.Left));
case ParsedExpressionBooleanOperatorNode.BooleanOperator.OrElse:
return Expression.OrElse(ConvertToExpression(parameter, op.Left), ConvertToExpression(parameter, op.Right));
case ParsedExpressionBooleanOperatorNode.BooleanOperator.AndAlso:
return Expression.AndAlso(ConvertToExpression(parameter, op.Left), ConvertToExpression(parameter, op.Right));
}
}
throw new PredicateConstructorException("Unknown parse tree structure");
}
/// <summary>
/// Parses the expression.
/// </summary>
/// <returns>Root of the parse tree.</returns>
private ParsedExpressionNode ParseExpression() {
_filterSource.SkipWhiteSpace();
ParsedExpressionNode n = ParseBooleanExpression();
_filterSource.SkipWhiteSpace();
if (_filterSource.TestFor(RegexBooleanOp)) {
string sop = _filterSource.Accept(RegexBooleanOp);
var o = new ParsedExpressionBooleanOperatorNode(sop) {Left = n, Right = ParseExpression()};
return o;
}
return n;
}
/// <summary>
/// Parses the boolean expression.
/// </summary>
/// <returns>The boolean root.</returns>
private ParsedExpressionNode ParseBooleanExpression() {
_filterSource.SkipWhiteSpace();
if (_filterSource.TestFor("(")) {
_filterSource.Accept("(");
ParsedExpressionNode n = ParseExpression();
if (!_filterSource.TestFor(")")) {
throw new PredicateConstructorException("Missing Closing ')'");
}
_filterSource.Accept(")");
return n;
}
if (_filterSource.TestFor(RegexBooleanNot)) {
string bop = _filterSource.Accept(RegexBooleanNot);
var n = new ParsedExpressionBooleanOperatorNode(bop) {Left = ParseBooleanExpression()};
return n;
}
return ParseSimpleQuery();
}
/// <summary>
/// Parses the simple query.
/// </summary>
/// <returns>The simple query root object.</returns>
private ParsedExpressionNode ParseSimpleQuery() {
if (!_filterSource.TestFor(RegexKeyword)) {
throw new PredicateConstructorException("Expected A Keyword");
}
string keyword = _filterSource.Accept(RegexKeyword);
if (!CheckForValidProperyOrField(keyword)) {
throw new PredicateConstructorException("Can only build expression using Properties or Fields of objects [" + keyword + "]");
}
var keywordNode = new ParsedExpressionNameNode(keyword);
if (!_filterSource.TestFor(RegexRelationalOp)) {
throw new PredicateConstructorException("Expected A Relational Operator After Keyword" + "[ " + _filterSource.OriginalString + "]");
}
string sop = _filterSource.Accept(RegexRelationalOp);
ParsedExpressionNode termNode = ParseTerm();
return new ParsedExpressionRelationalOperatorNode(sop) {Left = keywordNode, Right = termNode};
}
/// <summary>
/// Parses the term.
/// </summary>
/// <returns>The parsed term tree root.</returns>
private ParsedExpressionNode ParseTerm() {
return ParseValue();
}
/// <summary>
/// Parses the value. Can handle unquoted and quoted values. If you want <code>null</code> as a comparision
/// value don't use quotes.
/// </summary>
/// <returns>The parse value node.</returns>
private ParsedExpressionValueNode ParseValue() {
if (_filterSource.TestFor(RegexQoute)) {
char quoteCharacter = _filterSource[0];
// There may be a compound predicate - Name = 'x' || Description = 'y'
MatchCollection ms = RegexQuotedValue.Matches(_filterSource.CurrentInput);
// Always truncate from first matched " ' ". Ignore other matches
_filterSource.Accept(_filterSource.CurrentInput.Substring(0, ms[0].Value.Length));
string v = ms[0].Value.Substring(1, ms[0].Value.Length - 2);
var n = new ParsedExpressionValueNode(v) { Quoted = true, QuoteCharacter = quoteCharacter };
_filterSource.SkipWhiteSpace();
return n;
}
if (_filterSource.TestFor(RegexValue)) {
string v = _filterSource.Accept(RegexValue);
var n = new ParsedExpressionValueNode(v);
_filterSource.SkipWhiteSpace();
return n;
}
throw new PredicateConstructorException("Expected A Value In Expression [ " + _filterSource.OriginalString + "]");
}
#region Nested type: ParsedExpressionBinaryOperatorNode
/// <summary>
/// Base class for all operator nodes, including the ! operator node (in that case the right
/// child is always null).
/// </summary>
private class ParsedExpressionBinaryOperatorNode : ParsedExpressionNode {
/// <summary>
/// Gets or sets the left.
/// </summary>
/// <value>The left.</value>
public ParsedExpressionNode Left { get; set; }
/// <summary>
/// Gets or sets the right.
/// </summary>
/// <value>The right.</value>
public ParsedExpressionNode Right { get; set; }
}
#endregion
#region Nested type: ParsedExpressionBooleanOperatorNode
/// <summary>
/// The parent of a boolean expression. Left and right children for <code>and</code> and <code>or</code>
/// operators, left children only for <code>not</code>.
/// </summary>
private class ParsedExpressionBooleanOperatorNode : ParsedExpressionBinaryOperatorNode {
#region BooleanOperator enum
/// <summary>
/// The symbollic constants for the boolean operators.
/// </summary>
public enum BooleanOperator {
AndOp,
AndAlso,
OrOp,
OrElse,
NotOp
}
#endregion
/// <summary>
/// Construct a BooleanOperatorNode from a string representation of the operator.
/// Note: For Lucene, || (OrElse) yields the appropriate results for an Or,
/// && (AndAlso) yields the appropriate results for an And
/// </summary>
/// <param name="booleanOperator">A string boolean operator</param>
public ParsedExpressionBooleanOperatorNode(string booleanOperator) {
booleanOperator = booleanOperator.ToLower().Trim();
if (booleanOperator.Equals("&&")) {
Operator = BooleanOperator.AndAlso;
}
else if (booleanOperator.Equals("&"))
{
Operator = BooleanOperator.AndOp;
}
else if (booleanOperator.Equals("||")) {
Operator = BooleanOperator.OrElse;
}
else if (booleanOperator.Equals("|"))
{
Operator = BooleanOperator.OrOp;
}
else if (booleanOperator.Equals("!")) {
Operator = BooleanOperator.NotOp;
}
}
/// <summary>
/// Gets or sets the operator.
/// </summary>
/// <value>The operator.</value>
public BooleanOperator Operator { get; private set; }
}
#endregion
#region Nested type: ParsedExpressionNameNode
/// <summary>
/// The field or property name of a object that is always the left child of a relational node.
/// </summary>
private class ParsedExpressionNameNode : ParsedExpressionNode {
/// <summary>
/// Initializes a new instance of the <see cref="PredicateConstructor<TArg>.ParsedExpressionNameNode"/> class.
/// </summary>
/// <param name="name">The name.</param>
public ParsedExpressionNameNode(string name) {
Name = name;
}
/// <summary>
/// Gets the field or property name.
/// </summary>
/// <value>The name.</value>
public string Name { get; private set; }
}
#endregion
#region Nested type: ParsedExpressionNode
/// <summary>
/// Base class for all nodes in the generated parse tree.
/// </summary>
private class ParsedExpressionNode {
}
#endregion
#region Nested type: ParsedExpressionRelationalOperatorNode
/// <summary>
/// A parse not that represents a relational operator. Will have two children, relational
/// associativity is not supported.
/// </summary>
private class ParsedExpressionRelationalOperatorNode : ParsedExpressionBinaryOperatorNode {
#region RelationalOperator enum
public enum RelationalOperator {
LessThanOp,
LessThanEqualsOp,
GreaterThanOp,
GreaterThanEqualsOp,
EqualsOp,
NotEqualsOp
}
#endregion
/// <summary>
/// Construct a relational operator node using a string description of the operator.
/// </summary>
/// <param name="relationalOperator">The string version of the operator</param>
public ParsedExpressionRelationalOperatorNode(string relationalOperator) {
relationalOperator = relationalOperator.Trim().ToLower();
if (relationalOperator.Equals("=") || relationalOperator.Equals("==")) {
Operator = RelationalOperator.EqualsOp;
}
else if (relationalOperator.Equals("!=") || relationalOperator.Equals("<>")) {
Operator = RelationalOperator.NotEqualsOp;
}
else if (relationalOperator.Equals("<")) {
Operator = RelationalOperator.LessThanOp;
}
else if (relationalOperator.Equals("<=")) {
Operator = RelationalOperator.LessThanEqualsOp;
}
else if (relationalOperator.Equals(">")) {
Operator = RelationalOperator.GreaterThanOp;
}
else if (relationalOperator.Equals(">=")) {
Operator = RelationalOperator.GreaterThanEqualsOp;
}
}
/// <summary>
/// Gets or sets the operator.
/// </summary>
/// <value>The operator.</value>
public RelationalOperator Operator { get; private set; }
}
#endregion
#region Nested type: ParsedExressionValueNode
/// <summary>
/// The value side of a relation, in all cases the right child of a relational node.
/// </summary>
private class ParsedExpressionValueNode : ParsedExpressionNode {
/// <summary>
/// Initializes a new instance of the <see cref="PredicateConstructor<TArg>.ParsedExpressionValueNode"/> class.
/// </summary>
/// <param name="value">The value.</param>
public ParsedExpressionValueNode(string value) {
Value = value;
}
/// <summary>
/// Gets or sets the value.
/// </summary>
/// <value>The value.</value>
public string Value { get; private set; }
/// <summary>
/// Gets or sets a value indicating whether this input string was quoted.
/// </summary>
/// <value><c>true</c> if quoted; otherwise, <c>false</c>.</value>
public bool Quoted { get; set; }
/// <summary>
/// Gets or sets the quote character.
/// </summary>
/// <value>The quote character.</value>
public char QuoteCharacter { get; set; }
}
#endregion
#region Nested type: TokenizerString
/// <summary>
/// String wrapper class that provides the parser with a simple lexical tokenizer
/// for a string.
/// </summary>
private class TokenizerString {
private readonly string _originalInput;
private string _input;
/// <ummary>
/// Construct a TokenizerString object and pass a string expression. The string
/// parameter is saved and a copy is created that will be used for parsing.
/// </summary>
/// <param name="s">Original string expression</param>
public TokenizerString(string s) {
_originalInput = s;
_input = s.Trim();
}
/// <summary>
/// Gets the <see cref="System.Char"/> at the specified index.
/// </summary>
/// <value></value>
public char this[int index] {
get { return _input[index]; }
}
/// <summary>
/// The original string that was passed to the constructor is saved and
/// can be found using this property.
/// </summary>
public string OriginalString {
get { return _originalInput; }
}
/// <summary>
/// Gets the current input.
/// </summary>
/// <value>The current input.</value>
public string CurrentInput {
get { return _input; }
}
/// <summary>
/// Trim the white space from both the front and rear of the string.
/// </summary>
public void SkipWhiteSpace() {
_input = _input.Trim();
}
/// <summary>
/// Test for a leading string in the query string and return true if found.
/// </summary>
/// <param name="s">Test string</param>
/// <returns>True if the query string starts with s, otherwise false</returns>
public bool TestFor(string s) {
return _input.StartsWith(s);
}
/// <summary>
/// Test for a regular expression in the query string and return if found. One
/// should be careful to anchor the regular expression such that it doesn't match
/// somewhere other than the start (like the middle of the query string).
/// </summary>
/// <param name="testRegex">A regular expression object to test</param>
/// <returns>True if the regular expression if found and matched, otherwise false</returns>
public bool TestFor(Regex testRegex) {
return testRegex.Match(_input).Success;
}
/// <summary>
/// Test for a string at the start of the query string, and if the test is
/// successful then the query string is stripped of the test string (removed
/// from the front) and we return true.
/// </summary>
/// <param name="acceptString">Testing string</param>
/// <returns>True if the string was accepted, otherwise false</returns>
public void Accept(string acceptString) {
if (TestFor(acceptString)) {
_input = _input.Substring(acceptString.Length);
SkipWhiteSpace();
return;
}
throw new PredicateConstructorException();
}
/// <summary>
/// Accepts input characters based upon the specified regex.
/// </summary>
/// <param name="acceptRegex">The accept regex.</param>
/// <returns>The matched string or null</returns>
public string Accept(Regex acceptRegex) {
Match m = acceptRegex.Match(_input);
if (m.Success) {
_input = _input.Substring(m.Value.Length);
SkipWhiteSpace();
Group gv = m.Groups["value"];
if (gv == null || gv.Length == 0) {
return m.Value;
}
return gv.Value;
}
return null;
}
/// <sumary>
/// Return the current query string.
/// </summary>
/// <returns>The current query string.</returns>
public override string ToString() {
return _input;
}
}
#endregion
}
}<file_sep>๏ปฟusing System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Raven.Client.Document;
using Reducio.Data;
using Reducio.Utils;
using Newtonsoft.Json;
namespace Reducio.Core
{
public class IncidentTypeController
{
#region Members
#endregion
#region Properties
#endregion
#region CTOR
public IncidentTypeController(){}
#endregion
#region Methods
/// <summary>
/// Save an IncidentType to document store
/// </summary>
/// <param name="jsonIncidentType">Serilized IncidentType as Json string</param>
/// <param name="repository">Document Storage as RavenDataController</param>
/// <returns>Id of saved IncidentType as string</returns>
public string CreateIncidentType(string jsonIncidentType, RavenDataController dataController)
{
Enforce.That(string.IsNullOrEmpty(jsonIncidentType) == false,
"ErrorLoggingConntroller.CreateIncidentType - jsonIncidentType can not be null");
var incidentType = JsonConvert.DeserializeObject<IncidentType>(jsonIncidentType);
dataController.Save<IncidentType>(incidentType);
return incidentType.Id;
}
/// <summary>
/// Fetch an IncidentType for a Id
/// </summary>
/// <param name="id">Id as string</param>
/// <param name="dataController">Document storeage as RavenDataController</param>
/// <returns>The IncidentType, Id == null on failure</returns>
public IncidentType GetIncidentType(string id, RavenDataController dataController)
{
Enforce.That(string.IsNullOrEmpty(id),
"IncidentTypeController.GetIncidentType - id can not be null");
return dataController.Get<IncidentType>(id);
}
/// <summary>
/// Fetch all IncidentTypes
/// </summary>
/// <param name="dataController">Document storeage as RavenDataController</param>
/// <returns></returns>
public List<IncidentType> GetAllIncidentTypes(RavenDataController dataController)
{
return dataController.GetAll<IncidentType>();
}
/// <summary>
/// Delete an IncidentType with the supplied id
/// </summary>
/// <param name="id">Id as string</param>
/// <param name="dataController">Document storeage as RavenDataController</param>
public void DeleteIncidentType(string id, RavenDataController dataController)
{
Enforce.That(string.IsNullOrEmpty(id),
"IncidentTypeController.DeleteIncidentType - id can not be null");
dataController.Delete<IncidentType>(id);
}
/// <summary>
/// When an update is performed against an IncidentType,
/// propogate changes to all incidents with this type
/// </summary>
public void CascadeUpdateForIncidentType(IncidentType incidentType,
RavenDataController dataController)
{
}
/// <summary>
/// Remove the parent association from an incident
/// </summary>
public void BreakParentRelationship(string parentIncidentId, string incidentId,
RavenDataController dataController)
{
}
/// <summary>
/// A change to a IncidentType.Type on a parent propogates to all of the
/// children Incidents. That is, switching IncidentType = database to IncidentType
/// = javascript updates all Incidents
/// </summary>
/// <param name="id"></param>
/// <param name="incidentTypeId"></param>
public void CascadeParentIncidentType(string id, string incidentTypeId,
RavenDataController dataController)
{
}
/// <summary>
/// Fetch distribution of IncidentTypes: count of of each IncidentType
/// </summary>
/// <param name="dataController"></param>
/// <returns>IncidentType.Type and Count as Json string</returns>
public string GetIncidentTypeDistribution(RavenDataController dataController)
{
var distribution = dataController.GetAll<DistinctIncidentTypeIndex
.DistinctIncidentType>("DistinctIncidentTypeIndex");
return JsonConvert.SerializeObject(distribution);
}
#endregion
}
}
<file_sep>๏ปฟusing System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Reducio.Data;
using Reducio.Core;
using NUnit.Framework;
using Raven.Client.Document;
using Newtonsoft.Json;
namespace TestSuite
{
[TestFixture]
public class ErrorLoggingTests
{
/// <summary>
/// Should be able to create an indicent and store in Incident document
/// </summary>
[Test]
[Category("ErrorLogging")]
public void CanLogIncident()
{
// Prep
var docStore = GetDocStore();
TruncateIncidentDocuments(docStore);
int postUpdateCount = 0;
var newIncident = CreateTransitionError();
var repository = new RavenDataController(docStore);
var errorLoggingController = new ErrorLoggingController();
string json = JsonConvert.SerializeObject(newIncident);
errorLoggingController.LogIncident(json, repository);
using (var session = docStore.OpenSession())
{
postUpdateCount = session.Query<Incident>()
.Customize(x => x.WaitForNonStaleResults())
.Count();
}
Assert.AreEqual(1, postUpdateCount);
}
/// <summary>
/// Should be able to find a previous incident with the same hash title
/// </summary>
[Test]
[Category("ErrorLogging")]
public void CanFindParentIncident()
{
// Prep
var docStore = GetDocStore();
TruncateIncidentDocuments(docStore);
var parentIncident = CreateTransitionError();
parentIncident.IncidentType = new IncidentType("ParentTypeError", new DateTime(2012, 1, 31),
"The Parent Type", "Truncate database");
var repository = new RavenDataController(docStore);
repository.Save<Incident>(parentIncident);
// 2 more incidents for our searching
var incident2 = CreateTransitionError();
incident2.PageName = "Incident 2";
repository.Save<Incident>(incident2);
var incident3 = CreateTransitionError();
incident3.PageName = "Incident 3";
repository.Save<Incident>(incident3);
// Test
var errorLoggingController = new ErrorLoggingController();
var foundParent = errorLoggingController.FindParent(parentIncident.HashedTitle, repository);
Console.WriteLine(parentIncident.Id);
Assert.AreEqual(parentIncident.IncidentType.Type, foundParent.IncidentType.Type);
}
/// <summary>
/// Should be able to associate a new incident with the first occurance
/// of that incident. That is, when an incident has been logged, find it and save the
/// id as ParentId with the secondary incident
/// </summary>
[Test]
[Category("ErrorLogging")]
public void CanLogIncidentAndAssociateToParent()
{
// Prep
var docStore = GetDocStore();
TruncateIncidentDocuments(docStore);
// Parent
var parentIncident = CreateTransitionError();
parentIncident.IncidentType = new IncidentType("ParentTypeError", new DateTime(2012, 1, 31),
"The Parent Type", "Truncate database");
var repository = new RavenDataController(docStore);
repository.Save<Incident>(parentIncident);
// 2 more incidents for our searching
var incident2 = CreateTransitionError();
incident2.PageName = "Incident 2";
repository.Save<Incident>(incident2);
var incident3 = CreateTransitionError();
incident3.PageName = "Incident 3";
repository.Save<Incident>(incident3);
//The child incident
var childincident = CreateTransitionError();
var errorController = new ErrorLoggingController();
string childId = errorController.LogIncident(JsonConvert.SerializeObject(childincident), repository);
var savedChild = repository.Get<Incident>(childId);
Console.WriteLine("Child Parent Id " + savedChild.ParentIncidentId + " : Parent Id " + parentIncident.Id);
Assert.AreEqual(parentIncident.Id, savedChild.ParentIncidentId);
}
/// <summary>
/// Should be able to delete an incident
/// </summary>
[Test]
[Category("ErrorLogging")]
public void CanDeleteIncident()
{
// Prep
var docStore = GetDocStore();
TruncateIncidentDocuments(docStore);
var repository = new RavenDataController(docStore);
var incident = CreateTransitionError();
incident.PageName = "FindAnDeleteMe";
var errorLoggingController = new ErrorLoggingController();
string id = errorLoggingController
.LogIncident(JsonConvert.SerializeObject(incident), repository);
int postLogCount = 0;
using (var session = docStore.OpenSession())
{
postLogCount = session.Query<Incident>()
.Customize(x => x.WaitForNonStaleResults())
.Count();
}
// Test
errorLoggingController.DeleteIncident(id, repository);
int postDeleteCount = 0;
using (var session = docStore.OpenSession())
{
postDeleteCount = session.Query<Incident>()
.Customize(x => x.WaitForNonStaleResults())
.Count();
}
Assert.AreEqual(postLogCount, postDeleteCount + 1);
}
/// <summary>
/// Should be able to get all incidents
/// </summary>
[Test]
[Category("ErrorLogging")]
public void CanFetchAllIncidents()
{
var docStore = GetDocStore();
TruncateIncidentDocuments(docStore);
var batchIndicents = Create15Incidents();
var dataController = new RavenDataController(docStore);
batchIndicents.ForEach(x => dataController.Save<Incident>(x));
int totalCount = 0;
using(var session = docStore.OpenSession())
{
totalCount = session.Query<Incident>()
.Customize(x => x.WaitForNonStaleResults())
.Count();
}
// Test
var allIncidents = dataController.GetAll<Incident>();
Assert.AreEqual(totalCount, allIncidents.Count);
}
/// <summary>
/// Should be able to fetch only the incidents where IncidentType.Type == Javascript
/// </summary>
[Test]
[Category("ErrorLoggingQuery")]
public void CanQueryForJavascriptIncidentType()
{
var docStore = GetDocStore();
TruncateIncidentDocuments(docStore);
var batchIndicents = Create15Incidents();
var dataController = new RavenDataController(docStore);
batchIndicents.ForEach(x => dataController.Save<Incident>(x));
int totalJavascriptCount = 0;
using(var session = docStore.OpenSession())
{
totalJavascriptCount = session.Query<Incident>()
.Customize(x => x.WaitForNonStaleResults())
.Where(x => x.IncidentType.Type == "javascript")
.Count();
}
// Test
var javascriptIncidents = dataController.GetAllWhere<Incident>(inc => inc.IncidentType.Type == "javascript");
Assert.AreEqual(totalJavascriptCount, javascriptIncidents.Count);
}
[Test]
[Category("ErrorLoggingQuery")]
public void CanGetDistincIncidentTypeByIndex()
{
var docStore = GetDocStore();
TruncateIncidentDocuments(docStore);
var batchIndicents = Create15Incidents();
var dataController = new RavenDataController(docStore);
batchIndicents.ForEach(x => dataController.Save<Incident>(x));
Raven.Client.Indexes.IndexCreation.CreateIndexes(typeof(DistinctIncidentTypeIndex).Assembly, docStore);
var distinctIncidentTypes = dataController
.GetAll<DistinctIncidentTypeIndex.DistinctIncidentType>
("DistinctIncidentTypeIndex");
Assert.AreEqual(4, distinctIncidentTypes.Count);
}
[Test]
[Category("ErrorLoggingQuery")]
public void CanQueryIndexForJavascriptIncidentType()
{
var docStore = GetDocStore();
TruncateIncidentDocuments(docStore);
var batchIndicents = Create15Incidents();
var dataController = new RavenDataController(docStore);
batchIndicents.ForEach(x => dataController.Save<Incident>(x));
Raven.Client.Indexes.IndexCreation.CreateIndexes(typeof(DistinctIncidentTypeIndex).Assembly, docStore);
var javascriptIncidentType = dataController.GetAllWhere<DistinctIncidentTypeIndex.DistinctIncidentType>
(x => x.Name == "javascript",
"DistinctIncidentTypeIndex");
Assert.AreEqual(5, javascriptIncidentType[0].Count);
}
[Test]
[Category("ErrorLoggingQuery")]
public void CanQueryIndexForParentIncidents()
{
var docStore = GetDocStore();
TruncateIncidentDocuments(docStore);
var batchIncidents = Create15Incidents();
var dataController = new RavenDataController(docStore);
batchIncidents.ForEach(x => dataController.Save<Incident>(x));
var parentIncidents = dataController.GetAll<Incident>("ParentIncidentIndex");
Assert.AreEqual(2, parentIncidents.Count);
}
[Test]
[Category("ErrorLoggingQuery")]
public void CanQueryIndexForUnresolvedIncidents()
{
var docStore = GetDocStore();
TruncateIncidentDocuments(docStore);
var batchIncidents = Create15Incidents();
// Mark 5 of these as resolved
for (int i = 0; i <= 4; i++ )
{
batchIncidents[i].Resolved = true;
}
var dataController = new RavenDataController(docStore);
batchIncidents.ForEach(x => dataController.Save<Incident>(x));
Raven.Client.Indexes.IndexCreation.CreateIndexes(typeof(UnresolvedIncidentIndex).Assembly, docStore);
var unresolved = dataController.GetAll<Incident>("UnresolvedIncidentIndex");
Assert.AreEqual(10, unresolved.Count);
}
[TestFixtureSetUp]
public void CreateIncidentTypes()
{
var docStore = GetDocStore();
TruncateIncidentTypes(docStore);
var dataController = new RavenDataController(docStore);
var databaseType = new IncidentType("Database", new DateTime(2012, 3, 4), "Database offline", "reboot");
dataController.Save<IncidentType>(databaseType);
var javascriptType = new IncidentType("Javascript", new DateTime(2012, 3, 4), "jQuery not loaded", "review script");
dataController.Save<IncidentType>(javascriptType);
var nullObjectType = new IncidentType("Null Object Reference", new DateTime(2012, 3, 4), "Not record returned", "check database");
dataController.Save<IncidentType>(nullObjectType);
var pageNotFound = new IncidentType("Page Not Found", new DateTime(2012, 3, 4), "404", "check IIS");
dataController.Save<IncidentType>(pageNotFound);
}
#region Helper Methods
private Incident CreateTransitionError()
{
var newIncident = new Incident();
newIncident.PageName = "Transitions.aspx";
newIncident.IncidentDateTime = DateTime.Now;
newIncident.CurrentDOM = "Here is the DOM stuff";
newIncident.Title = "NUnit testing generated error";
return newIncident;
}
private List<Incident> Create15Incidents()
{
var incidents = new List<Incident>();
// 4 Database IncidentTypes
var databaseType = new IncidentType("Database", new DateTime(2012, 3, 4), "Database offline", "reboot");
int i;
for (i = 0; i <= 3; i++)
{
var incident = CreateTransitionError();
incident.IncidentType = databaseType;
incidents.Add(incident);
}
// 6 Javascript
var javascriptType = new IncidentType("Javascript", new DateTime(2012, 3, 4), "jQuery not loaded", "review script");
for (i = 0; i <= 5; i++)
{
var incident = CreateTransitionError();
incident.IncidentType = javascriptType;
incident.Title = incident.Title + " - " + i.ToString();
incidents.Add(incident);
}
// 2 Null Object Reference
var nullObjectType = new IncidentType("Null Object Reference", new DateTime(2012, 3, 4), "Not record returned", "check database");
for (i = 0; i <= 1; i++)
{
var incident = CreateTransitionError();
incident.IncidentType = nullObjectType;
incident.PageName = incident.PageName + " - " + i.ToString();
incidents.Add(incident);
}
// 3 Page Not Found
var pageNotFound = new IncidentType("Page Not Found", new DateTime(2012, 3, 4), "404", "check IIS");
for (i = 0; i <= 2; i++)
{
var incident = CreateTransitionError();
incident.IncidentType = pageNotFound;
incident.PageName = incident.PageName + " - " + i.ToString() + " " + incident.Title;
incidents.Add(incident);
}
return incidents;
}
private DocumentStore GetDocStore()
{
var docStore = new DocumentStore() { Url = "http://localhost:8080" };
docStore.Initialize();
return docStore;
}
private void TruncateIncidentDocuments(DocumentStore docStore)
{
using (var session = docStore.OpenSession())
{
var documents = session.Query<Incident>().ToList();
documents.ForEach(doc => session.Delete<Incident>(doc));
session.SaveChanges();
}
}
private void TruncateIncidentTypes(DocumentStore docStore)
{
using (var session = docStore.OpenSession())
{
var documents = session.Query<IncidentType>().ToList();
documents.ForEach(doc => session.Delete<IncidentType>(doc));
session.SaveChanges();
}
}
#endregion
}
}
<file_sep>๏ปฟusing System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
using System.Web.Security;
using System.Web.SessionState;
using Raven.Client;
using Raven.Client.Indexes;
using Reducio.Core;
using Reducio.Data;
using System.Configuration;
using Raven.Client.Document;
namespace TestWeb
{
public class Global : System.Web.HttpApplication
{
void Application_Start(object sender, EventArgs e)
{
// Connect to RavenDB
string connection = ConfigurationManager.ConnectionStrings["RavenDBLocal"].ConnectionString;
DocumentStore documentStore = new DocumentStore() { Url = connection };
documentStore.Initialize();
var dataController = new RavenDataController(documentStore);
Application["RavenDataController"] = dataController;
// Create Indexes
CreateReducioIndexes(documentStore);
}
void Application_End(object sender, EventArgs e)
{
// Code that runs on application shutdown
}
void Application_Error(object sender, EventArgs e)
{
// A server side exception was not handled. Create an incident
var errorController = new ErrorLoggingController();
var incident = new Incident();
incident.OriginalErrorMessage = Server.GetLastError().Message;
errorController.LogIncident(incident, Application["RavenDataController"] as RavenDataController);
}
void Session_Start(object sender, EventArgs e)
{
// Code that runs when a new session is started
Session["UserSessionGuid"] = System.Guid.NewGuid().ToString();
}
void Session_End(object sender, EventArgs e)
{
// Code that runs when a session ends.
// Note: The Session_End event is raised only when the sessionstate mode
// is set to InProc in the Web.config file. If session mode is set to StateServer
// or SQLServer, the event is not raised.
}
private void CreateReducioIndexes(DocumentStore documentStore)
{
Raven.Client.Indexes.IndexCreation.CreateIndexes(typeof(UnresolvedIncidentIndex).Assembly, documentStore);
Raven.Client.Indexes.IndexCreation.CreateIndexes(typeof(DistinctIncidentTypeIndex).Assembly, documentStore);
}
}
}
<file_sep>๏ปฟusing System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Raven.Client.Indexes;
using Raven.Abstractions.Indexing;
namespace Reducio.Core
{
public class UncataloggedIncidentIndex : AbstractIndexCreationTask<Incident>
{
public UncataloggedIncidentIndex()
{
Map = docs => from doc in docs
where doc.Catalogged == false
select new { doc.Title, doc.Id };
}
}
}
<file_sep>๏ปฟusing System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Raven.Client.Indexes;
using Raven.Abstractions.Indexing;
namespace Reducio.Core
{
public class UnresolvedIncidentIndex : AbstractIndexCreationTask<Incident>
{
public UnresolvedIncidentIndex()
{
Map = docs => from doc in docs
where doc.Resolved == false
select new { Title = doc.Title, Id = doc.Id };
Index("Title", FieldIndexing.Analyzed);
Index("Id", FieldIndexing.Analyzed);
}
}
}
<file_sep>๏ปฟusing System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
using System.Web.Services;
using Reducio.Core;
using Reducio.Data;
using Reducio.Utils;
namespace TestWeb.Services
{
/// <summary>
/// Summary description for ReducioData
/// </summary>
[WebService(Namespace = "http://tempuri.org/")]
[WebServiceBinding(ConformsTo = WsiProfiles.BasicProfile1_1)]
[System.ComponentModel.ToolboxItem(false)]
// To allow this Web Service to be called from script, using ASP.NET AJAX, uncomment the following line.
[System.Web.Script.Services.ScriptService]
public class ReducioData : System.Web.Services.WebService
{
[WebMethod]
public void LogError(string jsonError)
{
Enforce.That(string.IsNullOrEmpty(jsonError) == false,
"ErrorLogging.LogError - jsonError can not be null");
var errorLoggingController = new ErrorLoggingController();
errorLoggingController.LogIncident(jsonError, Application["RavenDataController"] as RavenDataController);
}
}
}
<file_sep>๏ปฟusing System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Diagnostics;
using System.Web;
namespace Reducio.Utils
{
public static class Enforce
{
public static T ArgumentNotNull<T>(T argument, string description)
where T : class
{
if (argument == null)
throw new ArgumentNullException(description);
return argument;
}
public static T ArgumentGreaterThanZero<T>(T argument, string description)
{
if (System.Convert.ToInt32(argument) < 1)
throw new ArgumentOutOfRangeException(description);
return argument;
}
public static DateTime ArgumentDateIsInitialized(DateTime argument, string description)
{
if (argument == DateTime.MinValue)
throw new ArgumentException("DateTime has not been initialized");
return argument;
}
public static Dictionary<T, K> ContainsKey<T,K>(Dictionary<T, K>search,
T key,
string description)
{
if(search.ContainsKey(key) == false)
{
throw new KeyNotFoundException(description);
}
return search;
}
public static void That(bool condition, string message)
{
if (condition == false)
{
throw new ArgumentException(message);
}
}
public static void That(bool condition, string message, List<string> errorList)
{
if (condition == false)
{
errorList.Add(message);
}
}
}
}
<file_sep>๏ปฟusing System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
using Newtonsoft.Json;
namespace TestWeb.DomainModel
{
public class EmployeeController
{
private List<Employee> employees;
public EmployeeController()
{
this.employees = new List<Employee>();
// Create 5 employees for testing
this.employees.Add(new Employee()
{
FirstName = "Elvis",
LastName = "Presley",
DepartmentId = 13,
StartDate = new DateTime(2012, 3, 4),
Id = "2"
});
this.employees.Add(new Employee()
{
FirstName = "James",
LastName = "Kirk",
DepartmentId = 1701,
StartDate = new DateTime(1966, 3, 4),
Id = "3"
});
this.employees.Add(new Employee()
{
FirstName = "Dolly",
LastName = "Parton",
DepartmentId = 44,
StartDate = new DateTime(2001, 12, 12),
Id = "4"
});
this.employees.Add(new Employee()
{
FirstName = "David",
LastName = "Coverdale",
DepartmentId = 13,
StartDate = new DateTime(184, 3, 22),
Id = "5"
});
this.employees.Add(new Employee()
{
FirstName = "<NAME>.",
LastName = "Riley",
DepartmentId = 7,
StartDate = new DateTime(1997, 8, 14)
});
}
public string GetAll()
{
return JsonConvert.SerializeObject(this.employees);
}
public string CreateEmployee(string jsonEmployee)
{
var employee = JsonConvert.DeserializeObject<Employee>(jsonEmployee);
employee.Id = (this.employees.Count + 1).ToString();
employees.Add(employee);
return employee.Id;
}
public string Get(string id)
{
var employee = this.employees.FirstOrDefault(x => x.Id == id) ?? new Employee();
return JsonConvert.SerializeObject(employee);
}
public void Delete(string id)
{
this.employees.Remove(this.employees.Find(x => x.Id == id));
}
}
}<file_sep>๏ปฟusing System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Raven.Client.Indexes;
namespace Reducio.Core
{
public class DistinctIncidentTypeIndex : AbstractIndexCreationTask<Incident, DistinctIncidentTypeIndex.DistinctIncidentType>
{
public class DistinctIncidentType
{
public string Name { get; set; }
public int Count { get; set; }
}
public DistinctIncidentTypeIndex()
{
Map = docs => from doc in docs
select new
{
Name = doc.IncidentType.Type.ToLower(),
Count = 1
};
Reduce = results => from result in results
group result by result.Name into g
select new
{
Name = g.Key,
Count = g.Sum(x => x.Count)
};
}
}
}
<file_sep>๏ปฟusing System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
using System.Web.UI;
using System.Web.UI.WebControls;
namespace TestWeb
{
public partial class ABadWebPage : System.Web.UI.Page
{
public string UserSessionGuid { get; set; }
protected void Page_Load(object sender, EventArgs e)
{
this.UserSessionGuid = Session["UserSessionGuid"].ToString();
}
}
}<file_sep>๏ปฟusing System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Security.Cryptography;
using Reducio.Utils;
using Janga.Validation;
namespace Reducio.Core
{
public class Incident
{
public string Id { get; set; }
public string ParentIncidentId { get; set; }
public string PageName { get; set; }
private string title;
public string Title
{
get
{
if (string.IsNullOrEmpty(this.title))
{
return string.Empty;
}
else
{
return this.title;
}
}
set
{
this.title = value;
this.HashTitle();
}
}
public string HashedTitle { get; set; }
public DateTime IncidentDateTime { get; set; }
public string OriginalErrorMessage { get; set; }
public string CurrentDOM { get; set; }
public string Notes { get; set; }
public string ClientData { get; set; }
public IncidentType IncidentType { get; set; }
public List<string> RelatedIncidents { get; set; }
public bool Resolved { get; set; }
public bool Catalogged { get; set; }
public string UserSessionGuid { get; set; }
public Incident()
: this(string.Empty, string.Empty, string.Empty, DateTime.Now, string.Empty,
string.Empty, string.Empty, new IncidentType(),
new List<string>(), false, false, string.Empty, string.Empty) { }
public Incident(string parentIncidentId, string pageName, string title, DateTime incidentDateTime,
string currentDOM, string originalErrorMessage, string notes,
IncidentType incidentType, List<string> relatedIncidents,
bool resolved, bool catalogged, string userSessionGuid, string clientData)
{
this.PageName = pageName;
this.ParentIncidentId = parentIncidentId;
this.Title = title;
this.IncidentDateTime = incidentDateTime;
this.OriginalErrorMessage = originalErrorMessage;
this.CurrentDOM = currentDOM;
this.Notes = notes;
this.IncidentType = incidentType;
this.RelatedIncidents = RelatedIncidents;
this.Resolved = resolved;
this.Catalogged = catalogged;
this.UserSessionGuid = userSessionGuid;
this.ClientData = clientData;
if (this.Title.Length > 0)
{
HashTitle();
}
}
/// <summary>
/// Ensure the minimum items ofr logging are enforced. Need IncidentDateTime, CurrentDOM, PageName
/// </summary>
/// <returns>True when valid</returns>
public bool CanIncidentBeLogged()
{
return this.Enforce<Incident>("Incident", true)
.When("Title", Compare.NotEqual, string.Empty)
.When("HashedTitle", Compare.NotEqual, string.Empty)
.When("IncidentDateTime", Compare.NotEqual, DateTime.MinValue)
.When("CurrentDOM", Compare.NotEqual, string.Empty)
.When("PageName", Compare.NotEqual, string.Empty)
.IsValid;
}
/// <summary>
/// Taken from Microsoft samples. Create a hash of the title with MD5 crypto
/// </summary>
private void HashTitle()
{
byte[] hashedBytes;
byte[] titleAsBytes = ASCIIEncoding.ASCII.GetBytes(this.Title.ToLower());
hashedBytes = new MD5CryptoServiceProvider().ComputeHash(titleAsBytes);
this.HashedTitle = ByteArrayToString(hashedBytes);
}
/// <summary>
/// Given an array of bytes return as string
/// </summary>
/// <returns></returns>
private string ByteArrayToString(byte[] byteArray)
{
int i;
StringBuilder output = new StringBuilder(byteArray.Length);
for (i = 0; i < byteArray.Length; i++)
{
output.Append(byteArray[i].ToString("X2"));
}
return output.ToString();
}
}
}
<file_sep>๏ปฟusing System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Raven.Client.Document;
using Raven.Client.Authorization;
using Reducio.Utils;
namespace Reducio.Data
{
public class RavenDataController
{
#region Members
private DocumentStore documentStore;
#endregion
#region Properties
public string UserId { get; set; }
public string Operation { get; set; }
#endregion
#region CTOR
public RavenDataController(DocumentStore documentStore)
{
this.documentStore = documentStore;
this.UserId = string.Empty;
this.Operation = string.Empty;
}
public RavenDataController(DocumentStore docStore, string userId, string operation)
{
this.documentStore = docStore;
this.UserId = userId;
this.Operation = operation;
}
#endregion
#region Methods
public T Get<T>(string id)
{
Enforce.That(string.IsNullOrEmpty(id) == false,
"RavenDataController.Get - id string can not be null");
using (var session = this.documentStore.OpenSession())
{
return session.Load<T>(id);
}
}
public void Delete<T>(string id)
{
Enforce.That(string.IsNullOrEmpty(id) == false,
"RavenDataController.Delete - id string can not be null");
using (var session = this.documentStore.OpenSession())
{
var deleteItem = session.Load<T>(id);
session.Delete<T>(deleteItem);
session.SaveChanges();
}
}
public void Save<T>(T saveItem)
{
using (var session = this.documentStore.OpenSession())
{
session.Store(saveItem);
session.SaveChanges();
}
}
public List<T> GetAll<T>()
{
using (var session = this.documentStore.OpenSession())
{
var items = session.Query<T>()
.Customize(x => x.WaitForNonStaleResults())
.ToList();
return items;
}
}
public List<T> GetAll<T>(string indexName)
{
Enforce.That(string.IsNullOrEmpty(indexName) == false,
"RavenDataController.GetAllWhere - indexName string can not be null");
using (var session = this.documentStore.OpenSession())
{
var items = session.Query<T>(indexName)
.Customize(x => x.WaitForNonStaleResults())
.ToList();
return items;
}
}
public List<T> GetAllWhere<T>(string predicateString, string indexName)
{
Enforce.That(string.IsNullOrEmpty(predicateString) == false,
"RavenDataController.GetAllWhere - predicateString can not be null");
Enforce.That(string.IsNullOrEmpty(indexName) == false,
"RavenDataController.GetAllWhere - indexName string can not be null");
using (var session = this.documentStore.OpenSession())
{
var predicate = new PredicateConstructor<T>();
if(string.IsNullOrEmpty(indexName))
{
return GetAllWhere(predicate.CompileToExpression(predicateString));
}
else
{
return GetAllWhere(predicate.CompileToExpression(predicateString), indexName);
}
}
}
public List<T> GetAllWhere<T>(System.Linq.Expressions.Expression<Func<T, bool>> predicate, string indexName)
{
Enforce.That(string.IsNullOrEmpty(indexName) == false,
"RavenDataController.GetAllWhere - indexName string can not be null");
using (var session = this.documentStore.OpenSession())
{
var items = session.Query<T>(indexName)
.Customize(x => x.WaitForNonStaleResults())
.Where<T>(predicate)
.ToList();
return items;
}
}
public List<T> GetAllWhere<T>(System.Linq.Expressions.Expression<Func<T, bool>> predicate)
{
using (var session = this.documentStore.OpenSession())
{
var items = session.Query<T>()
.Customize(x => x.WaitForNonStaleResults())
.Where<T>(predicate)
.ToList();
return items;
}
}
public void TruncateDocuments<T>()
{
using (var session = this.documentStore.OpenSession())
{
var documents = session.Query<T>().ToList();
documents.ForEach(doc => session.Delete<T>(doc));
session.SaveChanges();
}
}
public RavenDataController SecureForOperation(string userId, string operation)
{
this.UserId = userId;
this.Operation = operation;
return SecureForOperation();
}
public RavenDataController SecureForOperation()
{
using (var session = this.documentStore.OpenSession())
{
session.SecureFor(this.UserId, this.Operation);
session.SaveChanges();
return this;
}
}
#endregion
}
}
<file_sep>Reducio - A web application error logging framework for client and server logging
=================================================================================
###Premise
Reducio will help record Javascript errors that occur during client side testing, and will attempt to:
* Classify each error based on message thrown
* Record an exception from the client and associate secondary instances of that
exception to the primary error description
* Assign an identifier to the user session for error corrrelation
* Provide a method for mapping client side errors to any exceptions thrown in ther server environment
Just add this function to you web pages and errors will be logged on your server:
```javascript
<script type="text/javascript">
var userSessionGuid = "<%= UserSessionGuid%>";
</script>
```
And this function to your .js file
```javascript
/*
Error Handler Object
*/
var Incident = function (title, pageName, userSessionGuid, currentDOM, notes, clientData, originalErrorMessage) {
this.Title = title;
this.PageName = pageName;
this.UserSessionGuid = userSessionGuid;
this.CurrentDOM = currentDOM;
this.Notes = notes;
this.ClientData = clientData;
this.OriginalErrorMessage = originalErrorMessage;
}
function sendError(){
var o, xhr, data, msg = {}, argtype = typeof( arguments[0] );
// if it is an error object, just use it.
if( argtype === 'object' ){
msg = arguments[0];
}
// if it is a string, check whether we have 3 arguments...
else if( argtype === 'string') {
// if we have 3 arguments, assume this is an onerror event.
if( arguments.length == 3 ){
msg.message = arguments[0];
msg.fileName = arguments[1];
msg.lineNumber = arguments[2];
}
// otherwise, post the first argument
else {
msg.message = arguments[0];
}
}
// include the user agent
msg.userAgent = navigator.userAgent;
// client data - workflows, form Fields, etc.
var clientData = {};
clientData.Workflow = ko.toJSON(vm.selectedWorkflow());
clientData.FormFields = ko.toJSON(vm.formFields());
// Create incident for logging
var incident = new Incident("", document.URL, userSessionGuid, "{body: " + document.body.innerHTML +
"}", "", ko.toJSON(clientData), ko.toJSON(msg));
// Parse Title from message, remove 'Error: '
var endTitlePos = msg.message.indexOf(".");
incident.Title = msg.message.substr(7, endTitlePos - 7);
// convert to JSON string
data = {"jsonError" : ko.toJSON(incident) };
// If jQuery can't load we are in worse shape than what we think.
$.ajax({
url: "Services/ErrorLogging.asmx/LogError",
async: true,
type: "POST",
data: ko.toJSON(data),
contentType: "application/json; charset=utf-8",
dataType: "json",
error: function (XMLHttpRequest, textStatus, errorThrown) {
alert(XMLHttpRequest.status);
alert(XMLHttpRequest.responseText);
},
success: function (msg) {
}
});
// hide error message from user in supporting browsers
return false;
}
window.onerror = sendError;
```
###Technology
Reducio uses RavenDB, jQuery, KnockoutJS
|
9d6f86aaa2bf777eb99668dc3c279d8dc8497cf2
|
[
"Markdown",
"C#"
] | 17 |
C#
|
dhrobbins/Reducio
|
35b723d6d141b22988c8258e059c2bda2f146792
|
67f0ab6beac012356e0d462dc288df65006349cc
|
refs/heads/main
|
<file_sep>
BiocManager::install("statmod")
library(TCGAbiolinks)
library(limma)
library(edgeR)
library(statmod)
#ๅบๅ ่กจ่พพ็ฉ้ต
targets <- read.csv("/Users/yiyang/Desktop/GBM/Files/Rawcounts.csv", check.names = F)[,-1]
rownames(targets) <- targets[,1]
targets <- targets[,-1]
targets <- as.numeric(targets)
#ๅ็ป
group <- c(rep("NT",5),rep("TP",169))
#ๆๅปบDGEListๅฏน่ฑก
dgelist <- DGEList(counts = targets, group = group)
#่ฟๆปคไฝ่กจ่พพ็ๅบๅ ๏ผ่ฟ่กๆ ๅๅ
keep <- rowSums(cpm(dgelist) > 1 ) >= 2 #่ฟๆปค
dgelist <- dgelist[keep, ,keep.lib.sizes = FALSE]
#TMMๆ ๅๅ
dgelist_norm <- calcNormFactors(dgelist, method = 'TMM') #TMMๆ ๅๅ
#็ปๅถMDS
plotMDS(dgelist_norm, col = rep(c('red', 'blue'), each = 5), dim = c(1, 2))
#ไผฐ็ฎ็ฆปๆฃๅผ
design <- model.matrix(~group) #ๆๅปบๅ็ป็ฉ้ต
dge <- estimateDisp(dgelist_norm, design, robust = TRUE) #ไผฐ็ฎ็ฆปๆฃๅผ
#็ปๅถBCVไผฐ็ฎ็ฆปๆฃๅผ
plotBCV(dge) #ไฝๅพๆฅ็
#ๅบๅ ๅทฎๅผๅๆ
fit <- glmFit(dge, design, robust = TRUE) #ๆๅๆจกๅ
lrt <- glmLRT(fit) #็ป่ฎกๆฃ้ช
topTags(lrt)
dge_de <- decideTestsDGE(lrt, adjust.method = 'fdr', p.value = 0.05) #ๆฅ็้ป่ฎคๆนๆณ่ทๅพ็ๅทฎๅผๅบๅ
summary(dge_de)
plotMD(lrt, status = dge_de, values = c(1, -1), col = c('blue', 'red'))#ไฝๅพ่งๆต
abline(h = c(-1, 1), col = 'gray', lty = 2)
# export the standard Limma output
v <- voom(dge, design, plot=TRUE)
fit <- lmFit(v, design)
fit <- eBayes(fit)
topTable(fit, coef=ncol(design), n=500)
LIM <- as.data.frame(topTable(fit, coef=ncol(design), n=500))
LIM <- cbind(rownames(LIM),LIM)
colnames(LIM) <- c("ENSEMBL", "logFC", "AveExpre", "T", "Pvalue", "Adj.P.Value","B")
#convert gene ID
ensemblID <- LIM$RefID
gene.df <- bitr(ensemblID, fromType = "ENSEMBL",
toType = "REFSEQ",
OrgDb = org.Hs.eg.db)
LIM <- merge(gene.df,LIM,by="ENSEMBL")
LIM <- LIM[,-1]
write.csv(LIM,"/Users/yiyang/Desktop/GBM/Files/LIM.csv")
save.image("/Users/yiyang/Desktop/GBM/GBM/DGE Analysis & LIM Output.Rdata")
<file_sep>library(TCGAbiolinks)
library(R.utils)
#access TCGA data
query <- GDCquery(project = "TCGA-GBM",
data.category = "Transcriptome Profiling",
data.type = "Gene Expression Quantification",
workflow.type = "HTSeq - Counts")
GDCdownload(query = query)
samplesDown <- getResults(query,cols=c("cases"))
options(stringsAsFactors = F)
#Move all files under "sampleFiles_GBM"
setwd("/Users/yiyang/Desktop/ๆๅคช้พไบ/BISC490/Data")
dir.create("sampleFiles_GBM")
filepath <- dir(path = "./RAWData", full.names = T)
for(wd in filepath){
files <- dir(path = wd, pattern = "gz$")
fromfilepath <- paste(wd, "/", files, sep = "")
tofilepath <- paste("./sampleFiles_GBM/", files, sep = "")
file.copy(fromfilepath, tofilepath)
}
#unzip all files and delete the originial ones
setwd("./sampleFiles_GBM")
countsFiles <- dir(path = "./", pattern = "gz$")
sapply(countsFiles, gunzip)
library(rjson)
library(dplyr)
library(limma)
library(stringr)
# json files
setwd("/Users/yiyang/Desktop/ๆๅคช้พไบ/BISC490/Data/sampleFiles_GBM")
jsonFile <- fromJSON(file = "/Users/yiyang/Desktop/ๆๅคช้พไบ/BISC490/TCGA-GBM/metadata.cart.2020-09-08.json")
filesNameToBarcode <- data.frame(filesName = c(), TCGA_Barcode = c())
for(i in 1:length(jsonFile)){
TCGA_Barcode <- jsonFile[[i]][["associated_entities"]][[1]][["entity_submitter_id"]]
file_name <- jsonFile[[i]][["file_name"]]
filesNameToBarcode <- rbind(filesNameToBarcode, data.frame(filesName = file_name, TCGA_Barcode = TCGA_Barcode))
}
rownames(filesNameToBarcode) <- filesNameToBarcode[,1]
library(maftools)
#get counts matrix
filesNameToBarcode <- filesNameToBarcode[-1]
setwd("/Users/yiyang/Desktop/ๆๅคช้พไบ/BISC490/Data/sampleFiles_GBM")
countsFileNames <- dir(pattern = "counts$")
allsampleRawCounts <- data.frame()
for(txtFile in countsFileNames){
SampleCounts <- read.table(txtFile, header = F)
rownames(SampleCounts) <- SampleCounts[,1]
SampleCounts <- SampleCounts[-1]
colnames(SampleCounts) == filesNameToBarcode$TCGA_Barcode
if (dim(allsampleRawCounts)[1]==0){
allsampleRawCounts <- SampleCounts
}
else{
allsampleRawCounts <- cbind(allsampleRawCounts, SampleCounts)
}
}
colnames(allsampleRawCounts) <- filesNameToBarcode$TCGA_Barcode
ensembl_id <- substr(row.names(allsampleRawCounts), 1, 15)
rownames(allsampleRawCounts) <- ensembl_id
write.csv(allsampleRawCounts, file="/Users/yiyang/Desktop/ๆๅคช้พไบ/BISC490/Data/Files/RawCounts.csv")
|
88fb01ada47f5077e6b8fc59aa7761ec5bae9a2d
|
[
"R"
] | 2 |
R
|
wojiusuibianquyige/BISC490Codes
|
d7599545c4e6e79265f80bdfd7efb8f337ae65ff
|
f9372ddeced62df28183ce5de45ec24c5d703da4
|
refs/heads/master
|
<file_sep>import React, { Component } from 'react';
import { saveBook, getBook } from '../data/BookApi';
class AddBook extends Component {
state = {
title: ' ',
author: ' ',
isbn: ' ',
publicationDate: ' ',
publisher: ' ',
price: ' ',
genre: ' ',
format: ' ',
};
checkExistence = () => {
if (this.props.match.params.bookId) {
console.log('FOUND BOOKID');
//get the book from server
//fill it with current details for user to update
getBook(this.props.match.params.bookId).then(book => {
this.setState({
title: book.title,
author: book.author,
isbn: book.isbn,
publicationDate: book.publicationDate,
publisher: book.publisher,
price: book.price,
genre: book.genre,
format: book.format,
id: book.id,
});
});
} else {
console.log('NO BOOKID FOUND'); // so render empty form
}
};
componentDidMount() {
this.checkExistence();
}
handleChange = e => {
this.setState({
[e.target.id]: e.target.value,
});
};
handleSubmit = e => {
e.preventDefault();
saveBook(this.state); //adding to state
this.props.history.push('/');
window.location.reload(true);
};
render() {
return (
<div className="container">
<div className="form-group">
<form onSubmit={this.handleSubmit}>
<label htmlFor="title">
<h4>Title</h4>
</label>
<input
type="text"
value={this.state.title}
className="form-control"
id="title"
onChange={this.handleChange}
/>
<label htmlFor="author">
<h4>Author</h4>
</label>
<input
type="text"
value={this.state.author}
id="author"
className="form-control"
onChange={this.handleChange}
/>
<label htmlFor="isbn">
<h4>ISBN</h4>
</label>
<input
type="text"
value={this.state.isbn}
id="isbn"
className="form-control"
onChange={this.handleChange}
/>
<label htmlFor="publicationDate" className="">
<h4>PublicationDate</h4>
</label>
<input
type="text"
value={this.state.publicationDate}
id="publicationDate"
className="form-control"
onChange={this.handleChange}
/>
<label htmlFor="publisher" className="">
<h4>Publisher</h4>
</label>
<input
type="text"
value={this.state.publisher}
id="publisher"
className="form-control"
onChange={this.handleChange}
/>
<label htmlFor="price" className="">
<h4>Price</h4>
</label>
<input
type="text"
id="price"
value={this.state.price}
className="form-control"
onChange={this.handleChange}
/>
<label htmlFor="genre" className="">
<h4>Genre</h4>
</label>
<input
type="text"
id="genre"
value={this.state.genre}
className="form-control"
onChange={this.handleChange}
/>
<label htmlFor="format" className="">
<h4>Format</h4>
</label>
<input
type="text"
id="format"
value={this.state.format}
className="form-control"
onChange={this.handleChange}
/>
<button className="btn btn-default m-3">Submit</button>
</form>
</div>
</div>
);
}
}
export default AddBook;
|
1b1a03712a009979fab1a2c6ef537c7378dcb091
|
[
"JavaScript"
] | 1 |
JavaScript
|
priyanka19697/Bookstore
|
3303214e4755fdadc6345e1b4b838464d3aad0a2
|
1e11a6d188ff125e6367cf065149874134c9f9e9
|
refs/heads/master
|
<file_sep>var express = require('express');
var router = express.Router();
var app = express()
var multer = require('multer')
var upload = multer({dest: 'test/'})
/* GET users listing. */
router.post('/profile', upload.single('avatar'), function(req, res, next) {
console.log(req.file)
});
module.exports = router;
|
e640c5141921c10cc09e2be115fafee74fa1ae0e
|
[
"JavaScript"
] | 1 |
JavaScript
|
wendy-92/express-demo
|
5556347fa506c431b2e5dc795f851fcc3ee982e1
|
03ead2b55685185891df24004cdf16130dc4463c
|
refs/heads/master
|
<repo_name>cqol/jsonuri<file_sep>/spec/test_spec.js
'use strict'
const safeTrim = require('../dist/index')
describe('safeTrim', () => {
})
describe('bad args', () => {
//it('{}', () => {
// expect(safeTrim({})).toEqual('[object Object]')
//})
//
//it('[]', () => {
// expect(safeTrim([])).toEqual('')
//})
//
//it('NaN', () => {
// expect(safeTrim(NaN)).toEqual('NaN')
//})
//
//it('undefined', () => {
// expect(safeTrim(undefined)).toEqual('undefined')
//})
//
//it('null', () => {
// expect(safeTrim(null)).toEqual('null')
//})
//
//it('0', () => {
// expect(safeTrim(0)).toEqual('0')
//})
//
//it('function', () => {
// let fun = () => {}
// let ret = safeTrim(fun)
// expect(ret).toEqual(String(fun))
//})
})
|
b028d92eed4897a433b7a04e0c60fb76d46bb0f2
|
[
"JavaScript"
] | 1 |
JavaScript
|
cqol/jsonuri
|
2277ad861e5d95f33b1221a9020ca701bed76a87
|
f8b1e7eb151e10f7b96c1ac8d69f76c92a8d889b
|
refs/heads/master
|
<repo_name>ADGSankar/hangman<file_sep>/scripts/app.js
const puzzleEl = document.querySelector('#puzzle')
const gussesEl = document.querySelector('#guesses')
// document.querySelector('#puzzle').textContent = player1.puzzle;
// document.querySelector('#guesses').textContent = player1.statusMessage;
let player1
const render = () => {
puzzleEl.innerHTML = ''
gussesEl.textContent = player1.statusMessage
player1.puzzle.split('').forEach(charactor => {
const span = document.createElement('span')
span.textContent = charactor
puzzleEl.appendChild(span)
})
}
window.addEventListener('keypress', (e) => {
player1.makeGuess(e.key)
render()
})
const startGame = async () => {
const puzzle = await getPuzzle(1)
player1 = new Hungman(puzzle, 5)
render()
}
document.querySelector('#reset').addEventListener('click', startGame)
startGame()
// getPuzzle(1).then((data) => {
// console.log(data)
// }).catch((err) => {
// console.log(err)
// })
// getPuzzle('1').then((data) => {
// console.log(data)
// }, (err) => {
// console.log("Error occured")
// })
// console.log(getPuzzleSync())<file_sep>/README.md
You can have one site per GitHub repository. Repositories other than "username.github.io" will be published at http://username.github.io/REPONAME. Those repositories will publish whatever you push to the "gh-pages" branch.
A better description is available in the GitHub Pages documentation.
(since April 2013, all username.github.com are now username.github.io)
<file_sep>/scripts/requests.js
const getPuzzle = async (wordCount) => {
let data = await fetch(`//puzzle.mead.io/puzzle?wordCount=${wordCount}`)
if (data.status === 200) {
data = await data.json()
return data.puzzle
} else {
throw new Error('Something went wrong')
}
}
const getPuzzleOld = (wordCount) => {
return fetch(`//puzzle.mead.io/puzzle?wordCount=${wordCount}`).then((data) => {
if (data.status === 200) {
return data.json()
} else {
throw new Error('Something went wrong')
}
}).then((data) => {
return data.puzzle
})
}
// const getPuzzle = (wordCount) => new Promise((resolve, reject) => {
// const request = new XMLHttpRequest()
// request.open('GET', `http://puzzle.mead.io/puzzle?wordCount=${wordCount}`)
// request.send()
// request.addEventListener('readystatechange', (e) => {
// if (e.target.readyState === 4 && e.target.status === 200) {
// let puzzleData = JSON.parse(e.target.responseText)
// resolve(puzzleData.puzzle)
// } else if (e.target.readyState === 4) {
// reject('An error occured')
// }
// })
// })
// const getPuzzleSync = () => {
// const request = new XMLHttpRequest()
// request.open('GET', 'http://puzzle.mead.io/puzzle?wordCount=2', false)
// request.send()
// if (request.readyState === 4 && request.status === 200) {
// let puzzleData = JSON.parse(request.responseText)
// return puzzleData.puzzle;
// } else if (request.readyState === 4) {
// throw Error('Error occured')
// }
// }
|
aecc261b17f23eb43c8f0a7c809d5402d42533fb
|
[
"JavaScript",
"Markdown"
] | 3 |
JavaScript
|
ADGSankar/hangman
|
b802fed192ac4d119d80ce55e61eb50128fcac59
|
9fdd8081b242ecbb0f25d2943cd339e048725285
|
refs/heads/master
|
<file_sep>package com.codecool.minipa;
import java.text.SimpleDateFormat;
import java.util.Date;
public abstract class Entry {
SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd 'at' HH:mm:ss z");
private String creationDate;
private String message;
public Entry(String message) {
this.creationDate = formatter.format(new Date(System.currentTimeMillis()));
this.message = message;
}
public void setMessage(String message) {
this.message = message;
}
public String getMessage() {
return this.message;
}
public String getCreationDate() {
return this.creationDate;
}
}
<file_sep>package com.codecool.minipa;
public class Comment extends Entry {
private int id;
private static int currentCommentId = 0;
private boolean moderated = false;
public String message;
public Comment(String message) {
super(message);
this.id = generateNextId();
this.message = message;
}
public boolean isModerated() {
return moderated;
}
public int generateNextId() {
return currentCommentId++;
}
public void toggleModerated() {
if (this.moderated == false) {
this.moderated = true;
} else this.moderated = false;
}
public String getMessage() {
return this.message;
}
public int getId() {
return id;
}
@Override
public String toString() {
return ("The comment with id " + this.getId() + ": '" + this.getMessage() + "', with creation date: " + this.getCreationDate());
}
}
<file_sep>package com.codecool.minipa;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.stream.Collectors;
public class Topic extends Entry {
List<Comment> unModeratedComments = new ArrayList<>();
List<Comment> moderatedComments = new ArrayList<>();
private int id;
private static int currentTopicId = 1;
private String title;
public Topic(String title, String message) {
super(message);
this.setMessage(message);
this.title = title;
this.id = generateNextId();
}
public List<Comment> getUnmoderatedComments() {
return unModeratedComments;
}
public List<Comment> getModeratedComments() {
return moderatedComments;
}
public String showMessage() {
return this.getMessage();
}
public int generateNextId() {
return currentTopicId++;
}
public void addComment(String message) {
Comment newComment = new Comment(message);
unModeratedComments.add(newComment);
}
public void populateModeratedComments() {
moderatedComments = unModeratedComments.stream()
.filter(comment -> comment.isModerated() == true)
.collect(Collectors.toList());
}
public void moderateComment() {
Random rand = new Random();
int randomModeratedComment = rand.nextInt(this.getUnmoderatedComments().size());
this.getUnmoderatedComments().get(randomModeratedComment).toggleModerated();
populateModeratedComments();
}
public String getTitle() {
return title;
}
public void setTitle(String title) {
this.title = title;
}
@Override
public String toString() {
return ("TOPIC: " + this.getTitle() + "\r\n" + "TOPIC MESSAGE: " + this.showMessage() + "\r\n" + "THE COMMENT(S): " + this.getModeratedComments());
}
}
|
9836f5cccd50cd7e2c4856c21efc3915190cc5d0
|
[
"Java"
] | 3 |
Java
|
silviurdr/java-mini-pa
|
0684efd2cf704db4d9c48e467c379cf560a13fec
|
ca69f16aa6ba9989c00e68edcf091d982481bdc8
|
refs/heads/master
|
<file_sep>#! /usr/bin/env python
# encoding: utf-8
VERSION = '1.0.0'
APPNAME = 'ccc'
top = '.'
def options(opt):
opt.load('compiler_c')
opt.add_option(
'--tests',
action='store_true',
help='Compile unit tests'
)
opt.add_option(
'--dev',
action='store_true',
help='Add debuging information'
)
def configure(conf):
conf.load('compiler_c')
conf.check_cc(lib='m', store='M')
conf.env.CFLAGS = ['-Wall', '-Wshadow', '-pedantic', '-march=native']
conf.env.TESTS = conf.options.tests
conf.env.DEV = conf.options.dev
if conf.env.COMPILER_CC == 'gcc':
conf.env.append_value('CFLAGS', '-std=c99')
else:
conf.env.append_value('CFLAGS', '-std=c11')
if conf.options.dev:
conf.env.append_value('CFLAGS', '-g')
conf.env.append_value('CFLAGS', '-Werror')
else:
conf.define('NDEBUG', '')
def build(bld):
subdirs = ['src']
if bld.env.TESTS:
subdirs.append('tests')
bld.recurse(subdirs)
<file_sep>#pragma once
#include <stdio.h>
struct UnitContext {
char message[BUFSIZ];
int line;
const char* file;
const char* func;
};
<file_sep>#! /usr/bin/env python
# encoding: utf-8
bld(
features='c cprogram',
source = bld.path.ant_glob('*.c'),
use = ['ccc', 'M'],
includes = '../src',
target = 'runtest'
)
<file_sep>#include "suite.h"
#include "test.h"
int run_suite(struct UnitSuite* suite)
{
int i = 0;
int num_errored_test = 0;
while(suite[i].tests != NULL) {
num_errored_test += run_tests(suite[i].name, suite[i].tests);
i++;
}
return num_errored_test;
}
<file_sep>#include "test.h"
#include <assert.h>
#include <errno.h>
#include <malloc.h>
#include <setjmp.h>
#include <stdarg.h>
#include <stdlib.h>
#include <time.h>
enum State
{
STATE_UNKNOW,
STATE_SUCCESS,
STATE_FAIL,
STATE_SKIP,
};
enum Color
{
COLOR_RESET,
COLOR_RED,
COLOR_GREEN,
COLOR_YELLOW,
COLOR_BLUE,
COLOR_MAGENTA,
COLOR_CYAN,
};
const char* colorCodes[] = {
"\x1b[0m",
"\x1b[31m",
"\x1b[32m",
"\x1b[33m",
"\x1b[34m",
"\x1b[35m",
"\x1b[36m",
};
struct Result
{
unsigned int num_tests;
unsigned int num_successed_tests;
unsigned int num_errored_tests;
struct UnitTest* errored_test;
unsigned int duration;
};
static jmp_buf jumper;
static int nb_assert = 0;
static struct UnitTest* current_test = NULL;
#define print(color, ...) fprint(stdout, (color), __VA_ARGS__)
#define print_error(...) fprint(stderr, COLOR_RED, __VA_ARGS__)
static void fprint(FILE* stream, enum Color color, const char* format, ...)
{
va_list ap;
va_start(ap, format);
fprintf(stream, "%s", colorCodes[color]);
vfprintf(stream, format, ap);
fprintf(stream, "%s", colorCodes[COLOR_RESET]);
va_end(ap);
}
static int run_test(const struct UnitTest test)
{
int status;
errno = 0;
status = setjmp(jumper);
switch (status) {
case STATE_UNKNOW:
if (test.setup_func != NULL) {
test.setup_func();
}
test.test_func();
status = STATE_SUCCESS;
if (nb_assert == 0) {
print(COLOR_CYAN, "0");
}
else {
print(COLOR_GREEN, "โ");
}
break;
case STATE_FAIL:
print(COLOR_RED, "โ");
break;
case STATE_SKIP:
print(COLOR_YELLOW, "S");
break;
}
if (setjmp(jumper) != STATE_UNKNOW) {
fflush(stdout);
print_error("\nDon't use assertion in teardown function.\n");
abort();
}
if (test.teardown_func != NULL) {
test.teardown_func();
}
return status;
}
static void print_summary(const struct Result result)
{
for (int i = 0; i < result.num_errored_tests; i++) {
printf(
"\n%d) %s\n%s\n\n%s:%d\n",
i + 1,
result.errored_test[i].context.func,
result.errored_test[i].context.message,
result.errored_test[i].context.file,
result.errored_test[i].context.line
);
}
printf("\n");
print(COLOR_YELLOW, "Total: %u", result.num_tests);
print(COLOR_GREEN, " Passed: %u", result.num_successed_tests);
print(COLOR_RED, " Failed: %u", result.num_errored_tests);
print(COLOR_BLUE, " Duration: %u seconds\n\n", result.duration);
}
int run_tests(const char* name, struct UnitTest* tests)
{
time_t start;
struct Result result = { 0 };
printf("%s\n\n", name);
start = time(NULL);
while (tests[result.num_tests].test_func != NULL) {
nb_assert = 0;
current_test = &tests[result.num_tests];
switch (run_test(*current_test)) {
case STATE_FAIL:
result.num_errored_tests++;
result.errored_test = realloc(
result.errored_test,
result.num_errored_tests * sizeof(*result.errored_test)
);
result.errored_test[result.num_errored_tests - 1] = *current_test;
break;
case STATE_SUCCESS:
result.num_successed_tests++;
break;
}
current_test = NULL;
result.num_tests++;
}
result.duration = time(NULL) - start;
printf("\n");
print_summary(result);
free(result.errored_test), result.errored_test = NULL;
return result.num_errored_tests;
}
void test_skip(void)
{
longjmp(jumper, STATE_SKIP);
}
void test_fail(struct UnitContext context)
{
current_test->context = context;
longjmp(jumper, STATE_FAIL);
}
void test_success(void)
{
nb_assert++;
}
<file_sep># Contemporain C Checker
[](https://travis-ci.org/sanpii/ccc)
Contemporain means:
* Easy to useย ;
* Nice outputย ;
* support c99 types (`bool` and `complex`)ย ;
* use c11 new features (generic macro).

<file_sep>#! /usr/bin/env python
# encoding: utf-8
bld(
features='c cshlib',
source = bld.path.ant_glob('*.c'),
target = 'ccc',
version = '1.0.0'
)
<file_sep>#pragma once
struct UnitSuite {
const char* name;
struct UnitTest* tests;
};
#define ADD_SUITE(unit_tests) { #unit_tests, (unit_tests) }
#define END_OF_SUITE { NULL, NULL }
int run_suite(struct UnitSuite* suite);
<file_sep>#include "asserts.h"
#include "test.h"
#include <errno.h>
#include <float.h>
#include <stdarg.h>
#include <string.h>
void assert_true_(ACTUAL_PARAM(bool), EXTRA_PARAM, ...)
{
if (actual) {
test_success();
}
else {
struct UnitContext context;
if (format == NULL) {
snprintf(context.message, BUFSIZ, format, actual_str, actual);
}
else {
va_list ap;
va_start(ap, format);
vsnprintf(context.message, BUFSIZ, format, ap);
va_end(ap);
}
context.file = file;
context.line = line;
context.func = func;
test_fail(context);
}
}
void assert_int_equal_(ACTUAL_PARAM(int), EXPECTED_PARAM(int), EXTRA_PARAM)
{
if (format == NULL) {
format = "%s = %d != %d = %s";
}
assert_true_(
actual == expected, NULL,
file, line, func,
format, actual_str, actual, expected, expected_str
);
}
void assert_unsigned_int_equal_(
ACTUAL_PARAM(unsigned int),
EXPECTED_PARAM(unsigned int),
EXTRA_PARAM
) {
if (format == NULL) {
format = "%s = %llu != %llu = %s";
}
assert_true_(
actual == expected, NULL,
file, line, func,
format, actual_str, actual, expected, expected_str
);
}
void assert_double_equal_(
ACTUAL_PARAM(double),
EXPECTED_PARAM(double),
EXTRA_PARAM
) {
if (format == NULL) {
format = "%s = %g != %g = %s";
}
assert_true_(
actual - expected < DBL_EPSILON, NULL,
file, line, func,
format, actual_str, actual, expected, expected_str
);
}
void assert_unsigned_long_long_equal_(
ACTUAL_PARAM(unsigned long long),
EXPECTED_PARAM(unsigned long long),
EXTRA_PARAM
) {
if (format == NULL) {
format = "%s = %llu != %llu = %s";
}
assert_true_(
actual == expected, NULL,
file, line, func,
format, actual_str, actual, expected, expected_str
);
}
void assert_complex_equal_(
ACTUAL_PARAM(double complex),
EXPECTED_PARAM(double complex),
EXTRA_PARAM
) {
if (format == NULL) {
format = "%s = %g+i%g != %g+i%g = %s";
}
assert_true_(
cabs(actual - expected) < DBL_EPSILON, NULL,
file, line, func,
format, actual_str, creal(actual), cimag(actual),
creal(expected), cimag(expected), expected_str
);
}
void assert_errno_equal_(EXPECTED_PARAM(int), EXTRA_PARAM)
{
if (format == NULL) {
format = "errno = %d != %d = %s";
}
assert_true_(
errno == expected, NULL,
file, line, func,
format, errno, expected, expected_str
);
}
void assert_string_equal_(
ACTUAL_PARAM(const char*),
EXPECTED_PARAM(const char*),
EXTRA_PARAM
) {
if (format == NULL) {
format = "%s = %s != %s = %s";
}
assert_true_(
strcmp(actual, expected) == 0, NULL,
file, line, func,
format, actual_str, actual, expected, expected_str
);
}
void assert_file_exist_(const char* filename, EXTRA_PARAM)
{
FILE* fd = NULL;
if (format == NULL) {
format = "the file '%s' doesn't exists";
}
fd = fopen(filename, "r");
if (fd != NULL) {
fclose(fd);
}
assert_true_(fd != NULL, NULL,
file, line, func,
format, filename
);
}
<file_sep>#pragma once
#include <stdbool.h>
#include <complex.h>
#include "context.h"
#define ACTUAL_PARAM(type) type actual, const char* actual_str
#define EXPECTED_PARAM(type) type expected, const char* expected_str
#define EXTRA_PARAM const char* file, int line, const char* func, const char* format
#define EXTRA_ARGS __FILE__, __LINE__, __func__
void assert_true_(ACTUAL_PARAM(bool), EXTRA_PARAM, ...);
#define assert_true(value) assert_true_(value, #value, EXTRA_ARGS, NULL)
void assert_string_equal_(ACTUAL_PARAM(const char*), EXPECTED_PARAM(const char*), EXTRA_PARAM);
#define assert_string_equal(actual, expected) \
assert_string_equal_(actual, #actual, expected, #expected, EXTRA_ARGS, NULL)
void assert_int_equal_(ACTUAL_PARAM(int), EXPECTED_PARAM(int), EXTRA_PARAM);
#define assert_int_equal(actual, expected) \
assert_int_equal_(actual, #actual, expected, #expected, EXTRA_ARGS, NULL)
void assert_unsigned_int_equal_(ACTUAL_PARAM(unsigned int), EXPECTED_PARAM(unsigned int), EXTRA_PARAM);
#define assert_unsigned_int_equal(actual, expected) \
assert_unsigned_int_equal_(actual, #actual, expected, #expected, EXTRA_ARGS, NULL)
void assert_unsigned_long_long_equal_(ACTUAL_PARAM(unsigned long long), EXPECTED_PARAM(unsigned long long), EXTRA_PARAM);
#define assert_unsigned_long_long_equal(actual, expected) \
assert_unsigned_long_long_equal_(actual, #actual, expected, #expected, EXTRA_ARGS, NULL)
void assert_double_equal_(ACTUAL_PARAM(double), EXPECTED_PARAM(double), EXTRA_PARAM);
#define assert_double_equal(actual, expected) \
assert_double_equal_(actual, #actual, expected, #expected, EXTRA_ARGS, NULL)
void assert_complex_equal_(ACTUAL_PARAM(complex double), EXPECTED_PARAM(complex double), EXTRA_PARAM);
#define assert_complex_equal(actual, expected) \
assert_complex_equal_(actual, #actual, expected, #expected, EXTRA_ARGS, NULL)
#define assert_null(pointer) assert_true_((pointer) == NULL, #pointer, EXTRA_ARGS, "%s != NULL", #pointer)
#define assert_not_null(pointer) assert_true_((pointer) != NULL, #pointer, EXTRA_ARGS, "%s == NULL", #pointer)
#define assert_not_reached() assert_true_(false, "", EXTRA_ARGS, __FILE__ ":" __LINE__ " reached")
void assert_errno_equal_(EXPECTED_PARAM(int), EXTRA_PARAM);
#define assert_errno_equal(expected) assert_errno_equal_(expected, #expected, EXTRA_ARGS, NULL)
void assert_file_exist_(const char* filename, EXTRA_PARAM);
#define assert_file_exist(filename) assert_file_exist_(filename, EXTRA_ARGS, NULL)
#if __STDC_VERSION__ >= 201112L
# define assert_equal(actual, expected) _Generic((actual, expected), \
int: assert_int_equal_, \
double: assert_double_equal_, \
unsigned int: assert_unsigned_int_equal_, \
unsigned long long: assert_unsigned_long_long_equal_, \
double complex: assert_complex_equal_, \
float complex: assert_complex_equal_, \
char*: assert_string_equal_ \
)(actual, #actual, expected, #expected, EXTRA_ARGS, NULL)
#endif
<file_sep>#pragma once
#include "context.h"
struct UnitTest {
const char* name;
void (*test_func)(void);
void (*setup_func)(void);
void (*teardown_func)(void);
struct UnitContext context;
};
#define ADD_TEST(unit_test) { #unit_test, (unit_test), NULL, NULL }
#define ADD_TEST_WITH_SETUP(unit_test) { #unit_test, (unit_test), unit_test ## _setup, NULL }
#define ADD_TEST_WITH_TEARDOWN(unit_test) { #unit_test, (unit_test), NULL, unit_test ## _teardown }
#define ADD_TEST_FULL(unit_test) { #unit_test, (unit_test), unit_test ## _setup, unit_test ## _teardown }
#define END_OF_TEST { NULL, NULL, NULL, NULL }
int run_tests(const char* name, struct UnitTest* tests);
void test_skip(void);
void test_fail(struct UnitContext context);
void test_success(void);
<file_sep>#include "asserts.h"
#include "suite.h"
#include "test.h"
#include <stdbool.h>
#include <complex.h>
#include <stdlib.h>
static void test_assert_true(void)
{
bool actual = true;
assert_true(actual);
}
static void test_assert_string_equal(void)
{
const char* actual = "test";
assert_string_equal(actual, "test");
}
static void test_assert_double_equal(void)
{
double actual = 1.;
assert_double_equal(actual, 1.);
}
static void test_assert_unsigend_int_equal(void)
{
unsigned int actual = 1;
assert_unsigned_int_equal(actual, 1);
}
static void test_assert_unsigend_long_long_equal(void)
{
unsigned long long actual = 1;
assert_unsigned_long_long_equal(actual, 1);
}
static void test_assert_complex_equal(void)
{
double complex actual = 1. + I * 2.;
assert_complex_equal(actual, 1. + I * 2.);
}
static void test_assert_null(void)
{
void* actual = NULL;
assert_null(actual);
}
static void test_assert_not_null(void)
{
void* actual = &actual;
assert_not_null(actual);
}
static void test_assert_errno_equal(void)
{
assert_errno_equal(0);
}
static void test_assert_equal(void)
{
#if __STDC_VERSION__ >= 201112L
assert_equal(1, 1);
assert_equal(1., 1.);
assert_equal(1u, 1u);
assert_equal(1ull, 1ull);
assert_equal(I, I);
assert_equal(I * 1.f, I * 1.f);
assert_equal("test", "test");
#else
test_skip();
#endif
}
static void test_skip_test(void)
{
test_skip();
}
static void test_no_test(void)
{
}
static void test_file_exist(void)
{
assert_file_exist("tests/" __FILE__);
}
static void test_fixture_setup(void)
{
}
static void test_fixture(void)
{
assert_true(true);
}
static void test_fixture_teardown(void)
{
}
int main(void)
{
struct UnitTest asserts_tests[] = {
ADD_TEST(test_assert_true),
ADD_TEST(test_assert_string_equal),
ADD_TEST(test_assert_unsigend_int_equal),
ADD_TEST(test_assert_double_equal),
ADD_TEST(test_assert_unsigend_long_long_equal),
ADD_TEST(test_assert_complex_equal),
ADD_TEST(test_assert_null),
ADD_TEST(test_assert_not_null),
ADD_TEST(test_assert_errno_equal),
ADD_TEST(test_assert_equal),
ADD_TEST(test_skip_test),
ADD_TEST(test_no_test),
ADD_TEST(test_file_exist),
ADD_TEST_WITH_SETUP(test_fixture),
ADD_TEST_WITH_TEARDOWN(test_fixture),
ADD_TEST_FULL(test_fixture),
END_OF_TEST,
};
struct UnitSuite suite[] = {
ADD_SUITE(asserts_tests),
END_OF_SUITE,
};
return run_suite(suite) == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
}
|
17d8eda1f9684d44f1ddcd2fb64b35974e43d80b
|
[
"Markdown",
"C",
"Python"
] | 12 |
Python
|
sanpii/ccc
|
36c10a6154fa058d3a657c552f7e2ebe8c00627e
|
f3e00ff67d1cd683af1a4ee1eb74e155529d73a3
|
refs/heads/master
|
<repo_name>cytechmobile/reminderbot<file_sep>/README.md
## What it is.
As indicates its name this project is a reminder bot. Its created to use in Google Chat using REST API.
## For who.
Anyone that uses Google Chat.
## What is needed to build and use it.
1) A **google service account** and a **private key**. More info [here](https://developers.google.com/hangouts/chat/how-tos/service-accounts) and [here](https://developers.google.com/hangouts/chat/how-tos/bots-publish).
2) A **server** to host it and a **database**.
As environment variables needs :
a) Bot name
b) Path of the private key
c) Credentials for the connection with database
## How to use it.
The current functionality that this bot supports after you invite it is the following:
1) Set a reminder
a) For you
`@bot remind me what at 16/3/2019 16:33`
`@bot remind me what in 1 minute`
b) For anyone in the current room
`@bot remind @George Papakis what at 16/3/2019 16:33`
c) All in the current room
`@bot remind @all what at 16/3/2019 16:33`
d) All in any other room that bot is invited
`@bot remind #roomName what at 16/3/2019 16:33`
2) Set timezone
a) For each reminder
`@bot remind me 'what' at 16/03/2019 16:33 Athens `
b) If previews omitted set timezone for each user in every reminder he sets
`@bot set my timezone to athens`
c) If previews omitted set timezone for every user in the current domain
`@bot set global timezone to Paris`
d) By default it uses GMT
3) Show my reminders and timezone
a) For each user shows reminders that will notify him.
`@bot list`
Example:
`1) ID:23 what:' Something to do ' When: 23/01/2019 18:20 Europe/Athens`
b) To show your timezone and global timezone simply do
`@bot timezones`
4) Delete a reminder
a) For each user, using a reminders id.
`@bot delete 323 `
5) Show current version of the bot
a) For each user, using a reminder version.
`@bot version`
6) change bot configurations like this
a) For bot configurations.
`@bot config set key value`
b) For listing all configurations
`@bot config`
<file_sep>/src/test/java/gr/cytech/chatreminderbot/rest/TimeZoneTest.java
package gr.cytech.chatreminderbot.rest;
import gr.cytech.chatreminderbot.rest.controlCases.TimeZone;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class TimeZoneTest {
private TimeZone timeZone;
@BeforeEach
final void beforeEach() throws Exception {
timeZone = new TimeZone();
}
@Test
void findTimeZonesTest() {
String timezone1 = "athens";
String timezone2 = "thens";
String timezone3 = "PARIS";
String timezone4 = "RIS";
assertThat(timeZone.findTimeZones(timezone1)).isEqualTo("Europe/Athens");
assertThat(timeZone.findTimeZones(timezone2)).isEqualTo(null);
assertThat(timeZone.findTimeZones(timezone3)).isEqualTo("Europe/Paris");
assertThat(timeZone.findTimeZones(timezone4)).isEqualTo(null);
}
}
<file_sep>/pom.xml
<?xml version="1.0"?>
<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<modelVersion>4.0.0</modelVersion>
<groupId>gr.cytech</groupId>
<artifactId>chatreminderbot</artifactId>
<name>Chat Reminder Bot</name>
<version>1.15.0</version>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<maven.compiler.target>11</maven.compiler.target>
<maven.compiler.source>11</maven.compiler.source>
<surefire-plugin.version>3.0.0-M3</surefire-plugin.version>
<quarkus.version>0.15.0</quarkus.version>
<prettytime.version>4.0.2.Final</prettytime.version>
<google.api.client.version>1.28.0</google.api.client.version>
<guava.version>30.1.1-jre</guava.version>
<junit.version>5.4.2</junit.version>
<assertj.version>3.12.0</assertj.version>
<mockito.version>2.27.0</mockito.version>
</properties>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-bom</artifactId>
<version>${quarkus.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
<dependencies>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-resteasy-jsonb</artifactId>
</dependency>
<!-- Hibernate ORM specific dependencies -->
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-hibernate-orm</artifactId>
</dependency>
<!-- JDBC driver dependencies -->
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-jdbc-postgresql</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-flyway</artifactId>
</dependency>
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-smallrye-rest-client</artifactId>
</dependency>
<dependency>
<groupId>org.ocpsoft.prettytime</groupId>
<artifactId>prettytime-nlp</artifactId>
<version>${prettytime.version}</version>
</dependency>
<dependency>
<groupId>com.google.api-client</groupId>
<artifactId>google-api-client</artifactId>
<version>${google.api.client.version}</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>${guava.version}</version>
</dependency>
<!--testing libraries-->
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.rest-assured</groupId>
<artifactId>rest-assured</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<version>${mockito.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-engine</artifactId>
<version>${junit.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-api</artifactId>
<version>${junit.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.assertj</groupId>
<artifactId>assertj-core</artifactId>
<version>${assertj.version}</version>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<finalName>chatreminderbot</finalName>
<resources>
<resource>
<directory>src/main/resources</directory>
<filtering>true</filtering>
</resource>
</resources>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<version>3.0.0</version>
<dependencies>
<dependency>
<groupId>com.puppycrawl.tools</groupId>
<artifactId>checkstyle</artifactId>
<version>8.29</version>
</dependency>
</dependencies>
<executions>
<execution>
<id>validate</id>
<phase>validate</phase>
<configuration>
<includeTestSourceDirectory>true</includeTestSourceDirectory>
<configLocation>checkstyle.xml</configLocation>
<encoding>UTF-8</encoding>
<consoleOutput>true</consoleOutput>
<failsOnError>true</failsOnError>
</configuration>
<goals>
<goal>check</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-maven-plugin</artifactId>
<version>${quarkus.version}</version>
<executions>
<execution>
<goals>
<goal>build</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<version>${surefire-plugin.version}</version>
<configuration>
<systemProperties>
<java.util.logging.manager>org.jboss.logmanager.LogManager</java.util.logging.manager>
</systemProperties>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<executions>
<execution>
<goals>
<goal>integration-test</goal>
<goal>verify</goal>
</goals>
</execution>
</executions>
<configuration>
<environmentVariables>
<!--suppress UnresolvedMavenProperty -->
<DB_HOST>${docker.container.wa.ip}</DB_HOST>
<!--suppress UnresolvedMavenProperty -->
<APP_HOST>${docker.container.reminderbot.ip}</APP_HOST>
</environmentVariables>
</configuration>
</plugin>
<plugin>
<groupId>io.fabric8</groupId>
<artifactId>docker-maven-plugin</artifactId>
<version>0.30.0</version>
<configuration>
<showLogs>true</showLogs>
<images>


</images>
</configuration>
<executions>
<execution>
<id>build</id>
<phase>package</phase>
<goals>
<goal>build</goal>
</goals>
</execution>
<execution>
<id>prepare-it-database</id>
<phase>pre-integration-test</phase>
<goals>
<goal>start</goal>
</goals>
</execution>
<execution>
<id>remove-it-database</id>
<phase>post-integration-test</phase>
<goals>
<goal>stop</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>
<file_sep>/src/main/resources/db/migration/V3__updateChars_table.sql
ALTER TABLE reminder ALTER COLUMN what TYPE varchar(255);
<file_sep>/src/main/java/gr/cytech/chatreminderbot/rest/controlCases/CaseSetTimezone.java
package gr.cytech.chatreminderbot.rest.controlCases;
import gr.cytech.chatreminderbot.rest.db.Dao;
import gr.cytech.chatreminderbot.rest.message.Request;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.enterprise.context.RequestScoped;
import javax.inject.Inject;
import javax.transaction.Transactional;
import java.util.List;
@RequestScoped
public class CaseSetTimezone {
private static final Logger logger = LoggerFactory.getLogger(CaseSetTimezone.class);
@Inject
Dao dao;
private Request request;
private List<String> splitMsg;
private String keyWordMy;
private String keyWordGlobal;
private String response = "I didnt understand whose timezone to set, type help for instructions \n";
public CaseSetTimezone() {
}
public void setKeyWordMy(String keyWordMy) {
this.keyWordMy = keyWordMy;
}
public void setKeyWordGlobal(String keyWordGlobal) {
this.keyWordGlobal = keyWordGlobal;
}
public Request getRequest() {
return request;
}
public void setRequest(Request request) {
this.request = request;
}
public void setSplitMsg(List<String> splitMsg) {
this.splitMsg = splitMsg;
}
@Transactional
public String setTimezone() {
String givenTimezone = extractTimeZone();
//Checks given timezone
if (givenTimezone == null) {
return "Given timezone is wrong, try again.";
}
// setting global timezone
if (splitMsg.get(1).equals(keyWordGlobal)) {
logger.info("---Case Set global timezone---");
TimeZone defaultTimeZone = new TimeZone(givenTimezone, "default");
defaultTimeZone = dao.merge(defaultTimeZone);
response = "You successfully set the global timezone at:" + defaultTimeZone.getTimezone();
return response;
} else {
// setting user timezone
if (splitMsg.get(1).equals(keyWordMy)) {
logger.info("---Case Set user timezone---");
String who = request.getMessage().getSender().getName();
TimeZone timeZone = new TimeZone(givenTimezone, who);
timeZone = dao.merge(timeZone);
response = " <" + who + "> successfully set your timezone at:" + timeZone.getTimezone();
return response;
}
}
return response;
}
public String extractTimeZone() {
String[] message = request.getMessage().getText().split("\\s+");
String timeZone = null;
for (int i = 0; i < message.length; i++) {
if (message[i].equals("timezone") && message.length == i + 3) {
TimeZone timeZoneFinder = new TimeZone();
timeZone = timeZoneFinder.findTimeZones(message[i + 2]);
}
}
return timeZone;
}
}
<file_sep>/src/main/java/gr/cytech/chatreminderbot/rest/GoogleCards/CardResponseBuilder.java
package gr.cytech.chatreminderbot.rest.GoogleCards;
import javax.json.Json;
import javax.json.JsonArrayBuilder;
import javax.json.JsonObject;
import javax.json.JsonObjectBuilder;
import java.util.Map;
/**
* Creates a card response to a Hangouts Chat message, in JSON format.
* <p>
* See the documentation for more details:
* https://developers.google.com/hangouts/chat/reference/message-formats/cards
*/
public class CardResponseBuilder {
public static final String UPDATE_MESSAGE = "UPDATE_MESSAGE";
public static final String NEW_MESSAGE = "NEW_MESSAGE";
private JsonObject headerNode;
private JsonObjectBuilder thread;
private JsonObjectBuilder responseNode;
private JsonArrayBuilder widgetsArray;
private JsonArrayBuilder cardsArray;
/**
* Default public constructor.
*/
public CardResponseBuilder() {
this.thread = Json.createObjectBuilder();
this.responseNode = Json.createObjectBuilder();
this.cardsArray = Json.createArrayBuilder();
this.widgetsArray = Json.createArrayBuilder();
}
/**
* Creates a new CardResponseBuilder object for responding to an interactive card click.
*
* @param updateType the update type, either UPDATE_MESSAGE or NEW_MESSAGE.
*/
public CardResponseBuilder(String updateType) {
this();
responseNode.add("actionResponse", Json.createObjectBuilder()
.add("type", updateType));
}
/**
* Adds a header to the card response.
*
* @param title the header title
* @param subtitle the header subtitle
* @param imageUrl the header image
* @return this CardResponseBuilder
*/
public CardResponseBuilder header(String title, String subtitle, String imageUrl) {
this.headerNode = Json.createObjectBuilder()
.add("header", Json.createObjectBuilder()
.add("title", title)
.add("subtitle", subtitle)
.add("imageUrl", imageUrl)
.add("imageStyle", "IMAGE"))
.build();
return this;
}
/**
* Adds a TextParagraph widget to the card response.
*
* @param message the message in the text paragraph
* @return this CardResponseBuilder
*/
public CardResponseBuilder textParagraph(String message) {
this.widgetsArray.add(Json.createObjectBuilder()
.add("textParagraph", Json.createObjectBuilder()
.add("text", message)));
return this;
}
/**
* Adds a KeyValue widget to the card response.
* <p>
* For a list of icons that can be used, see:
* https://developers.google.com/hangouts/chat/reference/message-formats/cards#builtinicons
*
* @param key the key or top label
* @param value the value or content
* @param bottomLabel the content below the key/value pair
* @param iconName a specific icon
* @return this CardResponseBuilder
*/
public CardResponseBuilder keyValue(String key, String value,
String bottomLabel, String iconName) {
this.widgetsArray.add(Json.createObjectBuilder()
.add("keyValue", Json.createObjectBuilder()
.add("topLabel", key)
.add("content", value)
.add("bottomLabel", bottomLabel)
.add("icon", iconName)));
return this;
}
/**
* Adds an Image widget to the card response.
*
* @param imageUrl the URL of the image to display
* @param redirectUrl the URL to open when the image is clicked.
* @return this CardResponseBuilder
*/
public CardResponseBuilder image(String imageUrl, String redirectUrl) {
this.widgetsArray.add(Json.createObjectBuilder()
.add("image", Json.createObjectBuilder()
.add("imageUrl", imageUrl)
.add("onClick", Json.createObjectBuilder()
.add("openLink", Json.createObjectBuilder()
.add("url", redirectUrl)))));
return this;
}
/**
* Adds a Text Button widget to the card response.
* <p>
* When clicked, the button opens a link in the user's browser.
*
* @param text the text on the button
* @param redirectUrl the link to open
* @return this CardResponseBuilder
*/
public CardResponseBuilder textButton(String text, String redirectUrl) {
this.widgetsArray.add(Json.createObjectBuilder()
.add("buttons", Json.createArrayBuilder()
.add(Json.createObjectBuilder()
.add("textButton", Json.createObjectBuilder()
.add("text", text)
.add("onClick", Json.createObjectBuilder()
.add("openLink", Json.createObjectBuilder()
.add("url", redirectUrl)))))));
return this;
}
/**
* Adds an Image Button widget to the card response.
* <p>
* When clicked, the button opens a link in the user's browser.
*
* @param iconName the icon to display
* @param redirectUrl the link to open
* @return this CardResponseBuilder
*/
public CardResponseBuilder imageButton(String iconName, String redirectUrl) {
this.widgetsArray.add(Json.createObjectBuilder()
.add("buttons", Json.createArrayBuilder()
.add(Json.createObjectBuilder()
.add("imageButton", Json.createObjectBuilder()
.add("icon", iconName)
.add("onClick", Json.createObjectBuilder()
.add("openLink", Json.createObjectBuilder()
.add("url", redirectUrl)))))));
return this;
}
/**
* Adds an interactive Text Button widget to the card response.
* <p>
* When clicked, the button sends a new request to the bot, passing along the custom actionName
* and parameter values. The actionName and parameter values are defined by the developer when the
* widget is first declared (as shown below).
*
* @param text the text to display
* @param actionName the custom action name
* @param customActionParameters the custom key value pairs
* @return this CardResponseBuilder
*/
public CardResponseBuilder interactiveTextButton(String text, String actionName,
Map<String, String> customActionParameters) {
// Define the custom action name and parameters for the interactive button.
JsonObjectBuilder actionNode = Json.createObjectBuilder()
.add("actionMethodName", actionName);
if (customActionParameters != null && customActionParameters.size() > 0) {
addCustomActionParameters(actionNode, customActionParameters);
}
this.widgetsArray.add(Json.createObjectBuilder()
.add("buttons", Json.createArrayBuilder()
.add(Json.createObjectBuilder()
.add("textButton", Json.createObjectBuilder()
.add("text", text)
.add("onClick", Json.createObjectBuilder()
.add("action", actionNode))))));
return this;
}
public CardResponseBuilder thread(String name) {
this.thread.add("name", name);
return this;
}
/**
* Adds an interactive Image Button widget to the card response.
* <p>
* When clicked, the button sends a new request to the bot, passing along the custom actionName
* and parameter values. The actionName and parameter values are defined by the developer when the
* widget is first declared (as shown below).
*
* @param iconName the pre-defined icon to display.
* @param actionName the custom action name
* @param customActionParameters the custom key value pairs
* @return this CardResponseBuilder
*/
public CardResponseBuilder interactiveImageButton(String iconName, String actionName,
Map<String, String> customActionParameters) {
// Define the custom action name and parameters for the interactive button.
JsonObjectBuilder actionNode = Json.createObjectBuilder()
.add("actionMethodName", actionName);
if (customActionParameters != null && customActionParameters.size() > 0) {
addCustomActionParameters(actionNode, customActionParameters);
}
this.widgetsArray.add(Json.createObjectBuilder()
.add("buttons", Json.createArrayBuilder()
.add(Json.createObjectBuilder()
.add("imageButton", Json.createObjectBuilder()
.add("icon", iconName)
.add("onClick", Json.createObjectBuilder()
.add("action", actionNode))))));
return this;
}
/**
* Builds the card response and returns a JSON object node.
*
* @return card response as JSON-formatted string
*/
public String build(String typeForMessage) {
// If you want your header to appear before all other cards,
// you must add it to the `cards` array as the first / 0th item.
if (this.headerNode != null) {
this.cardsArray.add(this.headerNode);
}
JsonObject cardsNode =
responseNode
.add("actionResponse", Json.createObjectBuilder().add("type", typeForMessage))
.add("cards", this.cardsArray
.add(Json.createObjectBuilder()
.add("sections", Json.createArrayBuilder()
.add(Json.createObjectBuilder()
.add("widgets", this.widgetsArray)))))
.add("thread", this.thread)
.build();
return cardsNode.toString();
}
public String build() {
// If you want your header to appear before all other cards,
// you must add it to the `cards` array as the first / 0th item.
if (this.headerNode != null) {
this.cardsArray.add(this.headerNode);
}
JsonObject cardsNode =
responseNode
.add("cards", this.cardsArray
.add(Json.createObjectBuilder()
.add("sections", Json.createArrayBuilder()
.add(Json.createObjectBuilder()
.add("widgets", this.widgetsArray)))))
.add("thread", this.thread)
.build();
return cardsNode.toString();
}
/**
* Applies sets of custom parameters to the parameter field of an action.
* @param actionNode the JSON action node
* @param customActionParameters the parameters to apply to the custom action
*/
private void addCustomActionParameters(JsonObjectBuilder actionNode,
Map<String, String> customActionParameters) {
JsonArrayBuilder parametersArray = Json.createArrayBuilder();
customActionParameters.forEach((k, v) -> {
parametersArray.add(Json.createObjectBuilder()
.add("key", k)
.add("value", v));
});
actionNode.add("parameters", parametersArray);
}
public String cardWithOneInteractiveButton(String thread, String textParagraph, String buttonText,
String actionName, Map<String, String> parameters,
String updateType) {
//building a card with a text paragraph and one button
return new CardResponseBuilder(updateType)
.thread(thread)
.textParagraph(textParagraph)
.interactiveTextButton(buttonText, actionName,
parameters)
.build();
}
public String cardWithOnlyText(String thread, String text, String updateType) {
//building a simple card with only a text paragraph
return new CardResponseBuilder(updateType)
.thread(thread)
.textParagraph(text)
.build();
}
public String cardWithThreeInteractiveButton(String thread, String textParagraph, String textButton,
String actionName, Map<String, String> parameters,
String secondTextButton, String secondActionName,
String thirdTextButton, String thirdActionName,
String updateMessage) {
//building a card with a text paragraph and three button
return new CardResponseBuilder(updateMessage)
.thread(thread)
.textParagraph(textParagraph)
.interactiveTextButton(textButton, actionName, parameters)
.interactiveTextButton(secondTextButton, secondActionName, parameters)
.interactiveTextButton(thirdTextButton, thirdActionName, parameters)
.build();
}
}<file_sep>/src/main/resources/db/migration/V7__dropReminderTimezoneColumn.sql
ALTER TABLE reminder DROP COLUMN reminder_timezone;
<file_sep>/src/main/resources/db/migration/V8__recurringReminders.sql
ALTER TABLE reminder ADD COLUMN is_recurring boolean;
ALTER TABLE reminder ADD COLUMN full_text VARCHAR(255);
ALTER TABLE reminder ADD COLUMN is_for_all boolean;
<file_sep>/src/main/java/gr/cytech/chatreminderbot/rest/controlCases/CaseCancelReminder.java
package gr.cytech.chatreminderbot.rest.controlCases;
import gr.cytech.chatreminderbot.rest.GoogleCards.CardResponseBuilder;
import gr.cytech.chatreminderbot.rest.db.Dao;
import gr.cytech.chatreminderbot.rest.message.Request;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.enterprise.context.RequestScoped;
import javax.inject.Inject;
import javax.transaction.Transactional;
import java.util.List;
import static gr.cytech.chatreminderbot.rest.GoogleCards.CardResponseBuilder.NEW_MESSAGE;
import static gr.cytech.chatreminderbot.rest.GoogleCards.CardResponseBuilder.UPDATE_MESSAGE;
import static gr.cytech.chatreminderbot.rest.message.Action.CANCEL_REMINDER;
@RequestScoped
public class CaseCancelReminder {
private static final Logger logger = LoggerFactory.getLogger(CaseCancelReminder.class);
@Inject
Dao dao;
public CaseCancelReminder() {
}
@Transactional
String cancelReminder(Request request, List<String> splitMsg) {
String reminderId;
if (splitMsg.get(1).matches("[0-9]+")) {
reminderId = splitMsg.get(1);
} else {
return "Wrong id format, must be only numbers";
}
// -- Checks reminder id AND userid
int remId = Integer.parseInt(reminderId);
List<Reminder> reminders = dao.findReminders(request.getMessage().getSender().getName(),
remId);
if (reminders.isEmpty()) {
return "Couldn't find the reminder or maybe you don't own this reminder";
}
//in order to delete must use find first.
dao.deleteReminder(remId);
logger.info("Canceled reminder with ID: {}", remId);
String spaceId = request.getMessage().getThread().getSpaceId();
if (request.getAction() != null) {
if (request.getAction().getActionMethodName().equals(CANCEL_REMINDER)) {
return createCardResponse(reminders.get(0), UPDATE_MESSAGE, spaceId);
}
}
return createCardResponse(reminders.get(0), NEW_MESSAGE, spaceId);
}
private String createCardResponse(Reminder reminder, String typeForMessage, String spaceId) {
String textParagraph = "Reminder with text:\n<b>"
+ reminder.getWhat()
+ "</b>\nsuccessfully canceled!";
return new CardResponseBuilder()
.cardWithOnlyText("spaces/" + spaceId, textParagraph, typeForMessage);
}
}
<file_sep>/VersionFromPom.sh
#!/usr/bin/env bash
version=$(grep version pom.xml | grep -v '<?xml' | grep '<version>'|head -n 1|awk '{print $1}'| cut -d'>' -f 2 | cut -d'<' -f 1)
echo "##vso[task.setvariable variable=version]$version"<file_sep>/src/main/java/gr/cytech/chatreminderbot/rest/controlCases/CaseSetReminder.java
package gr.cytech.chatreminderbot.rest.controlCases;
import gr.cytech.chatreminderbot.rest.GoogleCards.CardResponseBuilder;
import gr.cytech.chatreminderbot.rest.beans.TimerSessionBean;
import gr.cytech.chatreminderbot.rest.db.Dao;
import gr.cytech.chatreminderbot.rest.message.Action;
import gr.cytech.chatreminderbot.rest.message.Request;
import org.ocpsoft.prettytime.nlp.PrettyTimeParser;
import org.ocpsoft.prettytime.nlp.parse.DateGroup;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.enterprise.context.RequestScoped;
import javax.inject.Inject;
import javax.transaction.UserTransaction;
import java.time.*;
import java.time.format.DateTimeFormatter;
import java.time.format.DateTimeParseException;
import java.util.TimeZone;
import java.util.*;
import static gr.cytech.chatreminderbot.rest.GoogleCards.CardResponseBuilder.NEW_MESSAGE;
import static gr.cytech.chatreminderbot.rest.GoogleCards.CardResponseBuilder.UPDATE_MESSAGE;
import static gr.cytech.chatreminderbot.rest.message.Action.*;
@RequestScoped
public class CaseSetReminder {
private static final Logger logger = LoggerFactory.getLogger(CaseSetReminder.class);
private static final Collection<String> WORDS_TO_IGNORE = Set.of("in", "on", "at", "every");
//Needs to set timer
@Inject
TimerSessionBean timerSessionBean;
@Inject
Dao dao;
@Inject
UserTransaction transaction;
Client client;
/*
* Build a reminder and persist if valid
*/
String buildAndPersistReminder(Request request) {
Reminder reminder = new Reminder();
reminder.setFullText(request.getMessage().getText());
reminder.setSpaceId(request.getMessage().getThread().getSpaceId());
reminder.setThreadId(request.getMessage().getThread().getThreadId());
if (request.getMessage().getText().length() >= 255) {
return "Part what can not be more than 255 chars";
}
List<String> splitMsg = new ArrayList<>(List.of(request.getMessage().getText().split("\\s+")));
String botName = dao.getBotName();
String timezone = dao.getUserTimezone(request.getMessage().getSender().getName());
if (splitMsg.get(0).equals("@" + botName)) {
splitMsg.remove(0);
}
String text = String.join(" ", splitMsg);
ZoneId zoneId = ZoneId.of(timezone);
TimeZone setTimeZone = TimeZone.getTimeZone(timezone);
PrettyTimeParser prettyTimeParser = new PrettyTimeParser(setTimeZone);
List<DateGroup> parse = prettyTimeParser.parseSyntax(text);
if (parse == null || parse.isEmpty()) {
return "i couldn't extract the time.\n"
+ "Check for misspelled word or use help command";
}
String timeToNotify = parse.get(0).getText();
for (String check : WORDS_TO_IGNORE) {
if (timeToNotify.startsWith(check + " ")) {
timeToNotify = timeToNotify.substring(check.length() + 1);
}
}
setInfosForRemind(request, reminder, splitMsg, parse, text, zoneId);
//pass from string to ZoneDateTime
//Check if date has passed
if (reminder.getWhen().isBefore(ZonedDateTime.now())) {
return "This date has passed "
+ reminder.getWhen() + ". Check your timezone or insert in the current reminder";
}
if (reminder.getWhen().isBefore(ZonedDateTime.now().plusSeconds(58)) && reminder.isRecuring()) {
logger.info("cant set reminder under 1 minute due to spam messages");
return "Sorry you cant set reminder under 1 minute";
}
if (request.getAction() != null) {
if (request.getAction().getParameters().get(0).get("key").equals("name")
&& !request.getAction().getParameters().get(0).get("value")
.equals(reminder.getSenderDisplayName())) {
logger.info("Button Click by other user so cant postpone the reminder");
return "You <b>can't</b> postpone another user's reminders.";
}
}
try {
transaction.begin();
dao.persist(reminder);
transaction.commit();
} catch (Exception e) {
try {
transaction.rollback();
logger.warn("Database Error when tried to commit the transaction with Exception: ", e);
return "Database Error transaction rollback";
} catch (Exception e1) {
logger.warn("Database Error when tried to rollback the transaction with Exception: ", e);
return "Oops something went wrong when tried to save the reminder";
}
}
timerSessionBean.setTimerForReminder(reminder);
Action parameters = new Action();
parameters.setBuildParametersForButton(reminder.getSenderDisplayName(), reminder.getReminderId(), "");
if (request.getAction() != null) {
logger.info("Button Clicked Update the card message");
return buildReminderResponse(reminder, timeToNotify, parameters.getBuildParametersForButton(),
request.getAction().getActionMethodName());
}
logger.info("returned default new message for reminder");
return buildReminderResponse(reminder, timeToNotify, parameters.getBuildParametersForButton(), "");
}
private String buildReminderResponse(Reminder reminder, String timeToNotify, Map<String,
String> parameters, String actionName) {
String reminderAnswer = "Reminder with text:\n<b>" + reminder.getWhat() + "</b>.\n"
+ "Saved successfully and will notify you in: \n<b>"
+ timeToNotify + "</b>";
String threadResponse = "spaces/" + reminder.getSpaceId() + "/threads/" + reminder.getThreadId();
if (actionName.equals(REMIND_AGAIN_IN_10_MINUTES)
|| actionName.equals(REMIND_AGAIN_NEXT_WEEK)
|| actionName.equals(REMIND_AGAIN_TOMORROW)) {
return new CardResponseBuilder()
.cardWithOnlyText(threadResponse, reminderAnswer + "\nReminder have been postponed!.",
UPDATE_MESSAGE);
}
return new CardResponseBuilder().cardWithOneInteractiveButton(threadResponse, reminderAnswer,
"Cancel Reminder", CANCEL_REMINDER, parameters, NEW_MESSAGE);
}
/*
* uses the text message of the user from Request
* message:(@reminder) remind me Something to do in 10 minutes
*
* Setting basic infos for the users reminder such us:
* who
* 1) me - returns <user/id>
* 2) #RoomName (with @all) - returns roomsName
* 3) @Firstname Lastname - returns <user/id>
* 4) @all - returns <user/all>
* what
* when
* timezone
* get it from message
* get it from users settings
* get it from global settings
* */
protected String updateUpToString(String upTo, Reminder reminder, List<String> splitMsg, Request request) {
//add reminder display name and remove remind and who part of the upTo string to
//display only the given text
if (splitMsg.get(0).equals("remind")) {
// 1) me
if (splitMsg.get(1).equals("me")) {
// ---- takes the ID of the sender ---
reminder.setSenderDisplayName(request.getMessage().getSender().getName());
} else if (splitMsg.get(1).equals("@all")) {
reminder.setSenderDisplayName(request.getMessage().getSender().getName());
reminder.setForAll(true);
} else {
String displayName = "";
if (splitMsg.get(1).startsWith("#")) {
// 2)#RoomName
displayName = splitMsg.get(1);
}
reminder.setSenderDisplayName(findIdUserName(displayName,
request.getMessage().getThread().getSpaceId()));
}
}
if (upTo.startsWith("remind ")) {
upTo = upTo.substring("remind ".length());
}
if (upTo.startsWith("me ")) {
upTo = upTo.substring("me ".length());
} else if (upTo.startsWith(reminder.getSenderDisplayName())) {
upTo = upTo.substring(reminder.getSenderDisplayName().length());
} else if (upTo.startsWith("@all ")) {
upTo = upTo.substring("@all ".length());
}
if (upTo.startsWith("to ")) {
upTo = upTo.substring("to ".length());
}
return upTo;
}
protected Reminder setInfosForRemind(Request request, Reminder reminder, List<String> splitMsg,
List<DateGroup> parse, String text, ZoneId zoneId) {
DateGroup dateGroup = parse.get(0);
int pos = dateGroup.getPosition();
String upTo = text.substring(0, pos).trim();
//removing ending words that PrettyTimeParser doesn't remove
for (String check : WORDS_TO_IGNORE) {
if (upTo.endsWith(" " + check)) {
upTo = upTo.substring(0, upTo.length() - (" " + check).length());
}
}
Instant when = Instant.ofEpochMilli(dateGroup.getDates().get(0).getTime());
reminder.setWhen(when.atZone(zoneId));
logger.info("set when: {}", reminder.getWhen());
upTo = updateUpToString(upTo, reminder, splitMsg, request);
if (upTo.startsWith("@")) {
if (client == null) {
client = Client.newClient(dao);
}
Map<String, String> listOfMembersInRoom = client
.getListOfMembersInRoom(request.getMessage().getThread().getSpaceId());
List<String> memberNames = new ArrayList<>(listOfMembersInRoom.keySet());
List<String> memberID = new ArrayList<>(listOfMembersInRoom.values());
for (int i = 0; i < memberNames.size(); i++) {
if (upTo.startsWith("@" + memberNames.get(i))) {
reminder.setSenderDisplayName(memberID.get(i));
upTo = upTo.replace("@" + memberNames.get(i), "");
}
}
}
if (upTo.startsWith(" to ")) {
upTo = upTo.substring(" to ".length());
}
//what: Something to do
reminder.setWhat(upTo);
if (parse.get(0).isRecurring()/* && reminder.getSenderDisplayName() != "users/all"*/) {
reminder.setRecuring(true);
} else {
reminder.setRecuring(false);
}
logger.info("set what: {}", reminder.getWhat());
return reminder;
}
/*
* @param given displayName
* @param spaceID
* @return user/id if not found given displayName
* */
protected String findIdUserName(String displayName, String spaceId) {
if (client == null) {
client = Client.newClient(dao);
}
Map<String, String> users = client.getListOfMembersInRoom(spaceId);
//if displayName not found then just save the name as it is
return users.getOrDefault(displayName, displayName);
}
//Returns date from string, based on dd/MM/yyyy HH:mm format,
//Is called after we ensure this is the current format
public static ZonedDateTime dateForm(String when, String timezone) {
String format = "dd/MM/yyyy HH:mm";
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(format);
return ZonedDateTime.parse(when, formatter.withZone(ZoneId.of(timezone)));
}
//Check if given date in string is in valid format
public static boolean isValidFormatDate(String when) {
String format = "dd/MM/yyyy HH:mm";
LocalDateTime ldt;
DateTimeFormatter formatter = DateTimeFormatter.ofPattern(format);
try {
ldt = LocalDateTime.parse(when, formatter);
String result = ldt.format(formatter);
return result.equals(when);
} catch (DateTimeParseException e) {
try {
LocalDate ld = LocalDate.parse(when, formatter);
String result = ld.format(formatter);
return result.equals(when);
} catch (DateTimeParseException exp) {
try {
LocalTime lt = LocalTime.parse(when, formatter);
String result = lt.format(formatter);
return result.equals(when);
} catch (DateTimeParseException e2) {
// logger.error("Error Parse LocalDateTime:{}", value, e);
}
}
}
return false;
}
}
<file_sep>/src/main/java/gr/cytech/chatreminderbot/rest/beans/FlywayMigration.java
package gr.cytech.chatreminderbot.rest.beans;
import io.quarkus.runtime.StartupEvent;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import org.flywaydb.core.Flyway;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.enterprise.context.ApplicationScoped;
import javax.enterprise.event.Observes;
@ApplicationScoped
public class FlywayMigration {
private static final Logger logger = LoggerFactory.getLogger(FlywayMigration.class);
private boolean migrated = false;
@ConfigProperty(name = "quarkus.datasource.url")
String dbUrl;
@ConfigProperty(name = "quarkus.datasource.username")
String dbUser;
@ConfigProperty(name = "quarkus.datasource.password")
String dbPassword;
public void migrate(@Observes StartupEvent se) {
logger.info("logger with url:{} user:{} pwd:{}", dbUrl, dbUser, dbPassword);
Flyway flyway = Flyway.configure()
.dataSource(dbUrl, dbUser, dbPassword)
.load();
flyway.migrate();
migrated = true;
}
public boolean migrationCompleted() {
return migrated;
}
}
<file_sep>/src/main/resources/db/migration/V5__updateKeyToUnique_table_.sql
delete from configurations where value = 'localhost' and key = 'buttonUrl';
alter table configurations alter column key set not null;
ALTER TABLE configurations
ADD CONSTRAINT UNIQUE_key UNIQUE(key);
<file_sep>/src/main/java/gr/cytech/chatreminderbot/rest/message/Space.java
package gr.cytech.chatreminderbot.rest.message;
public class Space {
private String spaceName;
private String spaceDisplayName;
private String spaceType;
public Space(String spaceName, String spaceDisplayName, String spaceType) {
this.spaceName = spaceName;
this.spaceDisplayName = spaceDisplayName;
this.spaceType = spaceType;
}
public Space() {
}
public String getSpaceName() {
return spaceName;
}
public void setSpaceName(String spaceName) {
this.spaceName = spaceName;
}
public String getSpaceDisplayName() {
return spaceDisplayName;
}
public void setSpaceDisplayName(String spaceDisplayName) {
this.spaceDisplayName = spaceDisplayName;
}
public String getSpaceType() {
return spaceType;
}
public void setSpaceType(String spaceType) {
this.spaceType = spaceType;
}
}
<file_sep>/src/test/java/gr/cytech/chatreminderbot/rest/controlCases/CaseSetConfigurationsTest.java
package gr.cytech.chatreminderbot.rest.controlCases;
import gr.cytech.chatreminderbot.rest.db.Dao;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.mockito.ArgumentCaptor;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.*;
public class CaseSetConfigurationsTest {
CaseSetConfigurations caseSetConfigurations;
Dao dao;
@BeforeEach
public void beforeEach() throws Exception {
dao = mock(Dao.class);
caseSetConfigurations = new CaseSetConfigurations();
caseSetConfigurations.dao = dao;
}
@Test
public void setConfigurationsTest() throws Exception {
String message = "config set buttonUrl localhost";
String expectedResponse = "Updated configuration to localhost with key buttonUrl";
List<String> splitMsg = List.of(message.split("\\s+"));
when(dao.merge(any(Configurations.class))).thenAnswer(inv -> inv.getArguments()[0]);
assertThat(caseSetConfigurations.configurationController(splitMsg)).isEqualTo(expectedResponse);
ArgumentCaptor<Configurations> argumentCaptor = ArgumentCaptor.forClass(Configurations.class);
//verify that merge has executed exactly 1 time
verify(dao, times(1)).merge(argumentCaptor.capture());
List<Configurations> captureConfigurations = argumentCaptor.getAllValues();
assertThat(captureConfigurations.get(0).getValue()).isEqualTo("localhost");
assertThat(captureConfigurations.get(0).getKey()).isEqualTo("buttonUrl");
}
@Test
public void listOfConfigurations() throws Exception {
String message = "config";
// adding multiply whitespaces instead of just pressing space in the string
String multiplyWhiteSpaces = String.format("%-24s", " ");
//mocked query to get key/value
List<Configurations> resultList = List.of((new Configurations("test", "tost")));
when(dao.getAllConfigurations()).thenReturn(resultList);
List<String> splitMsg = new ArrayList<>(Arrays.asList(message.split("\\s+")));
String expectedResponse = "the configurations right now are: \n" + " key" + multiplyWhiteSpaces
+ "value \n" + "<b>" + resultList.get(0).getKey()
+ "</b> " + " --> " + resultList.get(0).getValue() + " \n";
assertThat(caseSetConfigurations.configurationController(splitMsg))
.isEqualTo(expectedResponse);
}
}
<file_sep>/src/main/resources/db/migration/V2__added_timezone.sql
ALTER TABLE reminder
ADD COLUMN reminder_timezone VARCHAR (35);
create table time_zone(
timezone varchar (50),
userid varchar (50)
);<file_sep>/src/main/java/gr/cytech/chatreminderbot/rest/message/Request.java
package gr.cytech.chatreminderbot.rest.message;
public class Request {
private Message message;
private Action action;
private User user;
public Request() {
}
public User getUser() {
return user;
}
public void setUser(User user) {
this.user = user;
}
public Action getAction() {
return action;
}
public void setAction(Action action) {
this.action = action;
}
public Message getMessage() {
return message;
}
public void setMessage(Message message) {
this.message = message;
}
}
<file_sep>/src/main/resources/db/migration/V6__updateKey_table.sql
ALTER TABLE configurations ALTER COLUMN value TYPE TEXT;
<file_sep>/src/main/resources/db/migration/V4__configurations_table.sql
CREATE TABLE configurations(
key VARCHAR(100),
value varchar (255)
);<file_sep>/src/main/java/gr/cytech/chatreminderbot/rest/controlCases/CaseShowTimezones.java
package gr.cytech.chatreminderbot.rest.controlCases;
import gr.cytech.chatreminderbot.rest.db.Dao;
import gr.cytech.chatreminderbot.rest.message.Request;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.enterprise.context.RequestScoped;
import javax.inject.Inject;
import javax.persistence.NoResultException;
import javax.transaction.Transactional;
@RequestScoped
public class CaseShowTimezones {
private static final Logger logger = LoggerFactory.getLogger(CaseShowReminders.class);
@Inject
Dao dao;
private Request request;
public Request getRequest() {
return request;
}
public CaseShowTimezones() {
}
@Transactional
public String showTimezones(Request request) {
this.request = request;
String showTimezone = "---- Your timezone is ---- \n";
String noTimezoneFound = "---- No Timezone found default timezone is ---- \n";
String defaultTimezone = "---- Default timezone is ---- \n";
if (!dao.defaultTimezoneExists()) {
logger.info("created default timezone");
TimeZone timeZone = new TimeZone("Europe/Athens", "default");
dao.persist(timeZone);
}
String defaultTimezoneQuery = dao.getUserTimezone("default");
try {
String myTimezone = dao.getUserTimezone(request.getMessage().getSender().getName());
return showTimezone + "Timezone = " + myTimezone + "\n " + defaultTimezone
+ "Timezone = " + defaultTimezoneQuery;
} catch (NoResultException e) {
logger.info("in case no timezone found for the user");
return noTimezoneFound + "Timezone = " + defaultTimezoneQuery;
}
}
}
<file_sep>/src/main/resources/db/migration/V1__reminder_table.sql
CREATE TABLE reminder(
what VARCHAR (50),
whento TIMESTAMP,
reminder_id serial PRIMARY KEY,
space_id VARCHAR (15),
thread_id VARCHAR (15),
sender_displayname VARCHAR (50)
);<file_sep>/src/main/java/gr/cytech/chatreminderbot/rest/controlCases/CaseShowReminders.java
package gr.cytech.chatreminderbot.rest.controlCases;
import gr.cytech.chatreminderbot.rest.db.Dao;
import gr.cytech.chatreminderbot.rest.message.Request;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.enterprise.context.RequestScoped;
import javax.inject.Inject;
import java.time.format.DateTimeFormatter;
import java.util.List;
@RequestScoped
public class CaseShowReminders {
private static final Logger logger = LoggerFactory.getLogger(CaseShowReminders.class);
@Inject
Dao dao;
private Request request;
public CaseShowReminders() {
}
public Request getRequest() {
return request;
}
public void setRequest(Request request) {
this.request = request;
}
public String showReminders() {
List<Reminder> reminders = dao.showReminder(request.getMessage().getSender().getName());
String remindersShow = "---- Reminders that will notify you ---- \n";
if (reminders.isEmpty()) {
logger.debug("Reminders not found return - ");
return "---- Reminders not found ---";
} else {
return remindersShow + reminderListToString(reminders);
}
}
public String reminderListToString(List<Reminder> reminders) {
String remindersShow = "";
for (int i = 0; i < reminders.size(); i++) {
remindersShow += i + 1 + ") ID:" + reminders.get(i).getReminderId() + " what:' "
+ reminders.get(i).getWhat() + " ' When: "
+ DateTimeFormatter.ofPattern("dd/MM/yyyy HH:mm:ss")
.format(reminders.get(i).getWhen()
.withZoneSameLocal(reminders.get(i).getWhen().getZone())) + " "
+ reminders.get(i).getWhen().getZone() + "\n";
}
return remindersShow;
}
}
<file_sep>/src/main/java/gr/cytech/chatreminderbot/rest/controlCases/CaseSetConfigurations.java
package gr.cytech.chatreminderbot.rest.controlCases;
import gr.cytech.chatreminderbot.rest.db.Dao;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.enterprise.context.RequestScoped;
import javax.inject.Inject;
import javax.transaction.Transactional;
import java.util.List;
@RequestScoped
public class CaseSetConfigurations {
private static final Logger logger = LoggerFactory.getLogger(Control.class);
@Inject
Dao dao;
private List<String> splitMsg;
@Transactional
public String configurationController(List<String> splitMsg) {
this.splitMsg = splitMsg;
if (splitMsg.size() == 1) {
return configCommand();
}
if (splitMsg.get(1).equals("set")) {
return caseSetConfiguration();
}
return errorMessage();
}
public String caseSetConfiguration() {
StringBuilder valueForConfigurations = new StringBuilder();
for (int i = 0; i < splitMsg.size(); i++) {
if (i >= 3) {
valueForConfigurations.append(splitMsg.get(i)).append(" ");
}
}
valueForConfigurations.deleteCharAt(valueForConfigurations.length() - 1);
logger.info("the updated String is {}", valueForConfigurations);
Configurations newConfiguration = new Configurations(splitMsg.get(2), valueForConfigurations.toString());
newConfiguration = dao.merge(newConfiguration);
return "Updated configuration to " + newConfiguration.getValue() + " with key " + newConfiguration.getKey();
}
public String configCommand() {
List<Configurations> configs = dao.getAllConfigurations();
// adding multiply whitespaces instead of just pressing space in the string
String multiplyWhiteSpaces = String.format("%-24s", " ");
StringBuilder allConfigs = new StringBuilder("key" + multiplyWhiteSpaces + "value \n");
for (Configurations k : configs) {
String val = k.getValue();
if (k.getKey().toLowerCase().contains("key")) {
val = "***";
}
allConfigs.append("<b>").append(k.getKey())
.append("</b> ").append(" --> ").append(val).append(" \n");
}
return "the configurations right now are: \n " + allConfigs;
}
public String errorMessage() {
return "use `config set key value` for adding a configuration in database "
+ "or `config` to list the configurations";
}
}
<file_sep>/src/main/java/gr/cytech/chatreminderbot/rest/controlCases/Reminder.java
package gr.cytech.chatreminderbot.rest.controlCases;
import javax.persistence.*;
import java.time.ZonedDateTime;
import java.util.Objects;
@Entity
@Table(name = "reminder")
@NamedQueries({
@NamedQuery(name = "reminder.findNextPendingReminder",
query = "SELECT r FROM Reminder r WHERE r.when <= CURRENT_TIMESTAMP order by r.when"),
@NamedQuery(name = "reminder.findNextReminder",
query = "SELECT r FROM Reminder r order by r.when"),
@NamedQuery(name = "reminder.findAll",
query = "SELECT r from Reminder r"),
@NamedQuery(name = "reminder.showReminders",
query = "SELECT r from Reminder r where r.senderDisplayName "
+ "like :userid order by r.when"),
@NamedQuery(name = "reminder.findByUserAndReminderId",
query = "SELECT r from Reminder r where r.senderDisplayName "
+ "like :userId AND r.reminderId = :reminderId")
})
public class Reminder {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@Column(name = "reminder_id", nullable = false)
private int reminderId;
@Column(name = "what", nullable = false)
private String what;
@Column(name = "whenTo", nullable = false)
private ZonedDateTime when;
@Column(name = "sender_displayName", nullable = false)
private String senderDisplayName;
@Column(name = "space_id", nullable = false)
private String spaceId;
@Column(name = "thread_id", nullable = false)
private String threadId;
@Column(name = "is_recurring")
private boolean isRecuring;
@Column(name = "full_text")
private String fullText;
@Column(name = "is_for_all")
private boolean isForAll;
public Reminder() {
}
public Reminder(String what, ZonedDateTime when, String senderDisplayName, String spaceId, String threadId) {
this.what = what;
this.when = when;
this.senderDisplayName = senderDisplayName;
this.spaceId = spaceId;
this.threadId = threadId;
}
public int getReminderId() {
return reminderId;
}
public String getSpaceId() {
return spaceId;
}
public String getThreadId() {
return threadId;
}
public String getSenderDisplayName() {
return senderDisplayName;
}
public String getWhat() {
return what;
}
public ZonedDateTime getWhen() {
return when;
}
public boolean isRecuring() {
return isRecuring;
}
public String getFullText() {
return fullText;
}
public boolean isForAll() {
return isForAll;
}
public void setForAll(boolean forAll) {
isForAll = forAll;
}
public void setFullText(String fullText) {
this.fullText = fullText;
}
public void setRecuring(boolean recuring) {
isRecuring = recuring;
}
public void setReminderId(int reminderId) {
this.reminderId = reminderId;
}
public void setWhat(String what) {
this.what = what;
}
public void setWhen(ZonedDateTime when) {
this.when = when;
}
public void setSenderDisplayName(String senderDisplayName) {
this.senderDisplayName = senderDisplayName;
}
public void setSpaceId(String spaceId) {
this.spaceId = spaceId;
}
public void setThreadId(String threadId) {
this.threadId = threadId;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Reminder reminder = (Reminder) o;
return reminderId == reminder.reminderId
&& Objects.equals(what, reminder.what)
&& Objects.equals(when, reminder.when)
&& Objects.equals(senderDisplayName, reminder.senderDisplayName)
&& Objects.equals(spaceId, reminder.spaceId)
&& Objects.equals(threadId, reminder.threadId);
}
@Override
public int hashCode() {
return Objects.hash(reminderId, what, when, senderDisplayName, spaceId, threadId);
}
}
<file_sep>/src/main/java/gr/cytech/chatreminderbot/rest/controlCases/Configurations.java
package gr.cytech.chatreminderbot.rest.controlCases;
import javax.persistence.*;
@Entity
@Table(name = "configurations")
@NamedQueries({
@NamedQuery(name = "get.configurationByKey",
query = "SELECT t from Configurations t where t.key = :configKey"),
@NamedQuery(name = "get.allConfigurations",
query = "SELECT t from Configurations t")
})
public class Configurations {
@Id
@Column(name = "key", unique = true, nullable = false)
private String key;
@Column(name = "value")
private String value;
public Configurations(String key, String value) {
this.key = key;
this.value = value;
}
public Configurations() {
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
}
|
ec286eb60e37fe11e93237230ef4eb326be853ca
|
[
"SQL",
"Markdown",
"Maven POM",
"Java",
"Shell"
] | 25 |
Markdown
|
cytechmobile/reminderbot
|
469965f6970095feb133680ac40509d085d8ab22
|
ae3a760e37fbbb7f1cb9cdc9dae3a3f5d93f181c
|
refs/heads/main
|
<repo_name>37acoder/goqueue<file_sep>/queue.go
package goqueue
import (
"context"
"errors"
"fmt"
)
var (
ErrorQueueFull = errors.New("queue was full")
ErrorQueueEmpty = errors.New("queue was empty")
)
type Config struct {
PushBlocking bool
PopBlocking bool
MaxBuffer int64
}
type InMemoryQueue struct {
Config
receiver chan Task
}
func NewInMemoryQueue(config Config) *InMemoryQueue {
return &InMemoryQueue{
Config: config,
receiver: make(chan Task, config.MaxBuffer),
}
}
func (i *InMemoryQueue) Push(ctx context.Context, task Task) error {
if i.PushBlocking {
return i.BlockingPush(ctx, task)
} else { // non-blocking
return i.NonBlockingPush(ctx, task)
}
}
func (i *InMemoryQueue) BlockingPush(ctx context.Context, task Task) error {
select {
case <-ctx.Done():
return ctx.Err()
case i.receiver <- task:
return nil
}
}
func (i *InMemoryQueue) NonBlockingPush(ctx context.Context, task Task) error {
select {
case <-ctx.Done():
return ctx.Err()
case i.receiver <- task:
return nil
default:
return fmt.Errorf("push task failed, %w", ErrorQueueFull)
}
}
func (i *InMemoryQueue) Pop(ctx context.Context) (Task, error) {
if i.PopBlocking {
return i.BlockingPop(ctx)
} else {
return i.NonBlockingPop(ctx)
}
}
func (i *InMemoryQueue) BlockingPop(ctx context.Context) (Task, error) {
select {
case <-ctx.Done():
return nil, ctx.Err()
case task := <-i.receiver:
return task, nil
}
}
func (i *InMemoryQueue) NonBlockingPop(ctx context.Context) (Task, error) {
select {
case <-ctx.Done():
return nil, ctx.Err()
case task := <-i.receiver:
return task, nil
default:
return nil, fmt.Errorf("pop task failed, %w", ErrorQueueEmpty)
}
}
<file_sep>/demo/logcollector/collector.go
package logcollector
import (
"bufio"
"context"
"fmt"
"io"
"github.com/37acoder/goqueue"
)
type logIdCtxKey struct{}
func getLogIdFromContext(ctx context.Context) (string, bool) {
logId := ctx.Value(logIdCtxKey{})
if logId == nil {
return "", false
}
if realLogId, ok := logId.(string); ok {
return realLogId, true
} else {
return "", false
}
}
type Collector struct {
q goqueue.Queue
writer io.Writer
}
func NewCollector(output io.Writer) *Collector {
q := goqueue.NewInMemoryQueue(goqueue.Config{
PushBlocking: true,
PopBlocking: true,
MaxBuffer: 1024,
})
return &Collector{
q: q,
writer: bufio.NewWriterSize(output, 10),
}
}
type LogTask struct {
LogContent string
c *Collector
}
func (l *LogTask) Execute(ctx context.Context) error {
l.c.PersistLogs(ctx, l.LogContent)
return nil
}
func (c *Collector) CtxLog(ctx context.Context, logLevel LogLevel, format string, args ...interface{}) {
logId, ok := getLogIdFromContext(ctx)
if !ok {
logId = "NoLogId"
}
format = "LogId:" + logId + " " + format
logContent := fmt.Sprintf(format, args...)
err := c.q.Push(context.Background(), &LogTask{
LogContent: logContent,
c: c,
})
if err == nil {
return
} else {
fmt.Printf("output log failed:%s, log:%s\n", err, logContent)
}
}
func (c *Collector) PersistLogs(ctx context.Context, logContent string) {
write, err := c.writer.Write([]byte(logContent))
if err != nil {
fmt.Printf("persist log error: %s, write size:%d, log content: %s\n", err, write, logContent)
return
}
}
func (c *Collector) Start() {
go func() {
for {
ctx := context.Background()
task, err := c.q.Pop(ctx)
if err != nil {
continue
}
err = task.Execute(ctx)
if err != nil {
continue
}
}
}()
}
<file_sep>/go.mod
module github.com/37acoder/goqueue
go 1.16
<file_sep>/demo/logcollector/collector_test.go
package logcollector
import (
"context"
"fmt"
"os"
"testing"
"time"
)
func TestNewCollector(t *testing.T) {
ctx := context.Background()
ctx = context.WithValue(ctx, logIdCtxKey{}, "2010")
//output := os.Stdout
output, err := os.OpenFile("run.log", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModeAppend)
if err != nil {
panic(err)
}
logCollector := NewCollector(output)
logCollector.Start()
fmt.Println(1)
go func() {
count := 0
for range time.Tick(time.Second) {
logCollector.CtxLog(ctx, LogLevelError, "count:%d\n", count)
count++
}
}()
time.Sleep(time.Second * 100)
}
<file_sep>/demo/logcollector/base.go
package logcollector
import "context"
type LogLevel int
const (
LogLevelDebug LogLevel = 0
LogLevelInfo LogLevel = 1
LogLevelError LogLevel = 2
)
type LogCollector interface {
CtxLog(ctx context.Context, logLevel LogLevel, fmt string, args ...interface{})
}<file_sep>/base.go
package goqueue
import "context"
type Queue interface {
Push(ctx context.Context, task Task) error
Pop(ctx context.Context) (Task, error)
}
type Task interface {
Execute(ctx context.Context) error
}
|
29592ff36ba06fc0c7873a1c3510867fdd9cdbeb
|
[
"Go Module",
"Go"
] | 6 |
Go
|
37acoder/goqueue
|
a95204375bdd709e913a003b964249b5026aee5e
|
9544272df75a7a09ca68fd6f7be6dd6655c7ab71
|
refs/heads/master
|
<file_sep>#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Author : Eric
import os.path
from selenium import webdriver
from Librarys.logger import Logger
import yaml
logger = Logger(logger="BrowserDriver").getlog()
class BrowserDriver(object):
# ่ทๅ็ธๅฏน่ทฏๅพ
base_path = os.path.abspath('.')
driver_path = os.path.join(base_path, 'Drivers')
chrome_driver = os.path.join(driver_path, 'chromedriver.exe')
ie_driver = os.path.join(driver_path, 'IEDriverServer.exe')
def __init__(self, driver):
self.driver = driver
def openbrowser(self, driver):
# ่ฏปๅ้
็ฝฎๆไปถ
file_path = os.path.join(self.base_path, 'conf')
file_name = os.path.join(file_path, 'config.yaml')
with open(file_name, 'r') as f:
config = yaml.load(f.read())
# ่ทๅ้
็ฝฎๆไปถๅฑๆง
browser = config['browserType']['browserName']
logger.info("้ๆฉ็ๆต่งๅจไธบ: %s ๆต่งๅจ" % browser)
url = config['testUrl']['URL']
logger.info("ๆๅผ็URLไธบ: %s" % url)
if browser == "Firefox":
driver = webdriver.Firefox()
logger.info("ๅฏๅจ็ซ็ๆต่งๅจ")
elif browser == "Chrome":
driver = webdriver.Chrome(self.chrome_driver)
logger.info("ๅฏๅจ่ฐทๆญๆต่งๅจ")
elif browser == "IE":
driver = webdriver.Ie(self.ie_driver)
logger.info("ๅฏๅจIEๆต่งๅจ")
driver.get(url)
logger.info("ๆๅผURL: %s" % url)
driver.maximize_window()
logger.info("ๅ
จๅฑๅฝๅ็ชๅฃ")
driver.implicitly_wait(10)
logger.info("่ฎพ็ฝฎ10็ง้ๅผ็ญๅพ
ๆถ้ด")
return driver
def quit_browser(self):
logger.info("ๅ
ณ้ญๆต่งๅจ")
self.driver.quit()
<file_sep>#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Author : Eric
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.select import Select
from selenium.common.exceptions import *
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
import os.path
from .logger import Logger
import time
logger = Logger(logger='BasePage').getlog()
class BasePage(object):
def __init__(self, driver):
"""
:param driver:ๆๅผๆต่งๅจ้ฉฑๅจ
"""
self.driver = driver
def get_page_title(self):
"""่ทๅ้กต้ขtitle"""
logger.info("ๅฝๅ้กต้ข็titleไธบ๏ผ %s" % self.driver.title)
return self.driver.title
def find_element(self, *loc):
try:
# ๅ
็ด ๅฏ่งๆถ๏ผ่ฟๅๆฅๆพๅฐ็ๅ
็ด ๏ผไปฅไธๅ
ฅๅไธบๅ
็ป็ๅ
็ด ๏ผ้่ฆๅ *
WebDriverWait(self.driver, 30).until(lambda driver: driver.find_element(*loc).is_displayed())
return self.driver.find_element(*loc)
except NoSuchElementException:
logger.warning('ๆพไธๅฐๅฎไฝๅ
็ด : %s' % loc[1])
raise
except TimeoutException:
logger.warning('ๆฅๆพๅ
็ด ่ถ
ๅธ: %s' % loc[1])
def get_screent_img(self):
"""ๅฐ้กต้ขๆชๅพไธๆฅ"""
file_path = os.path.join(os.path.abspath('.'), 'Screenshots')
now = time.strftime("%Y-%m-%d_%H_%M_%S_")
screen_name = os.path.join(file_path, now + '.png')
try:
self.driver.get_screent_as_file(screen_name)
logger.info("้กต้ขๅทฒๆชๅพ๏ผๆชๅพ็่ทฏๅพๅจ้กน็ฎ๏ผ /Screenshots/่ทฏๅพไธ")
except NameError as ne:
logger.error("ๅคฑ่ดฅๆชๅพ %s" % ne)
self.get_screent_img()
def send_key(self, loc, text):
logger.info('ๆธ
็ฉบๆๆฌๆกๅ
ๅฎน: %s' % loc[1])
self.find_element(*loc).clear()
time.sleep(0.3)
logger.info('่พๅ
ฅๅ
ๅฎนๆนๅผ by %s: %s' % (loc[0], loc[1]))
logger.info('่พๅ
ฅๅ
ๅฎน: %s' % text)
try:
self.find_element(*loc).send_keys(text)
except Exception as e:
logger.error("่พๅ
ฅๅ
ๅฎนๅคฑ่ดฅ %s" % e)
self.get_screent_img()
def click(self, loc):
logger.info('็นๅปๅ
็ด by %s: %s' % (loc[0], loc[1]))
try:
self.find_element(*loc).click()
except AttributeError as e:
logger.error("ๆ ๆณ็นๅปๅ
็ด : %s" % e)
raise
def clear(self, loc):
"""่พๅ
ฅๆๆฌๆกๆธ
็ฉบๆไฝ"""
element = self.find_element(*loc)
try:
element.clear()
logger.info("ๆธ
็ฉบๆๆฌๆกๅ
ๅฎน")
except NameError as e:
logger.error("ๆธ
็ฉบๆๆฌๆกๅ
ๅฎนๅคฑ่ดฅ: %s" % e)
self.get_screent_img()
def move_to_element(self, loc):
"""
้ผ ๆ ๆฌๅๆไฝ
Usage:
element = ('id', 'xxx')
driver.move_to_element(element)
"""
element = self.find_element(*loc)
ActionChains(self.driver).move_to_element(element).perform()
def back(self):
"""ๆต่งๅจ่ฟๅ็ชๅฃ"""
self.driver.back()
logger.info('่ฟๅไธไธไธช้กต้ข')
def forward(self):
"""ๆต่งๅจๅ่ฟไธไธไธช็ชๅฃ"""
self.driver.forward()
logger.info('ๅ่ฟๅฐไธไธไธช้กต้ข')
def wait(self, seconds):
self.driver.implicitly_wait(seconds)
logger.info('็ญๅพ
%d ็ง' % seconds)
def close(self):
"""ๅ
ณ้ญๆต่งๅจ"""
try:
self.driver.close()
logger.info('ๅ
ณ้ญๆต่งๅจ็ชๅฃ')
except NameError as ne:
logger.error('ๅ
ณ้ญๆต่งๅจ็ชๅฃๅคฑ่ดฅ %s' % ne)
def quit(self):
"""้ๅบๆต่งๅจ"""
self.driver.quit()
def get_title(self):
"""่ทๅtitle"""
return self.driver.title
def get_text(self, loc):
"""่ทๅๆๆฌ"""
element = self.find_element(*loc)
return element.text
def get_attribute(self, loc, name):
"""่ทๅๅฑๆง"""
element = self.find_element(*loc)
return element.get_attribute(name)
def js_execute(self, js):
"""ๆง่กjs"""
return self.driver.execte_script(js)
def js_focus_element(self, loc):
"""่็ฆๅ
็ด """
target = self.find_element(*loc)
self.driver.execte_script("arguments[0].scrollIntoView();", target)
def js_scroll_top(self):
"""ๆปๅจๅฐ้กถ้จ"""
js = "window.scrollTo(0, 0)"
self.driver.execte_script(js)
def js_scroll_end(self):
"""ๆปๅจๅฐๅบ้จ"""
js = "window.scrollTo(0, document.body.scrollHeight)"
self.driver.execte_script(js)
def select_by_index(self, loc, index):
"""้่ฟ็ดขๅผ๏ผ indexๆฏ็ดขๅผ็ฌฌๅ ไธช๏ผไป0ๅผๅง"""
element = self.find_element(*loc)
Select(element).select_by_index(index)
def select_by_value(self, loc, value):
"""้่ฟvalueๅฑๆง"""
element = self.find_element(*loc)
Select(element).select_by_value(value)
def select_by_text(self, loc, text):
"""้่ฟๆๆฌๅผๅฎไฝ"""
element = self.find_element(*loc)
Select(element).select_by_visible_text(text)
def is_text_in_element(self, loc, text, timeout=10):
"""ๅคๆญๆๆฌๅจๅ
็ด ้๏ผๆฒกๅฎไฝๅฐๅ
็ด ่ฟๅFalse, ๅฎไฝๅฐๅ
็ด ่ฟๅๅคๆญ็ปๆๅธๅฐๅผ"""
try:
result = WebDriverWait(self.driver, timeout, 1).until(EC.text_to_be_present_in_element(loc, text))
except TimeoutException:
logger.error("ๅ
็ด ๆฒกๆๅฎไฝๅฐ๏ผ %s" % loc)
return False
else:
return result
def is_text_in_value(self, loc, value, timeout=10):
"""
ๅคๆญๅ
็ด ็valueๅผ๏ผๆฒกๅฎไฝๅฐๅ
็ด ่ฟๅFalse๏ผๅฎไฝๅฐๅ
็ด ่ฟๅๅคๆญ็ปๆๅธๅฐๅผ
result = driver.text_in_element(element, text)
"""
try:
result = WebDriverWait(self.driver, timeout, 1).until(EC.text_to_be_present_in_element_value(loc, value))
except TimeoutException:
logger.error("ๅ
็ด ๆฒกๆๅฎไฝๅฐ๏ผ %s" % loc)
return False
else:
return result
def is_title(self, title, timeout=10):
"""ๅคๆญtitleๅฎๅ
จ็ญไบ"""
result = WebDriverWait(self.driver, timeout, 1).until(EC.title_is(title))
return result
def is_title_contains(self, title, timeout=10):
"""ๅคๆญtitleๅ
ๅซ"""
result = WebDriverWait(self.driver, timeout, 1).until(EC.title_contains(title))
return result
def is_selected(self, loc, timeout=10):
"""ๅคๆญๅ
็ด ่ขซ่ขซ้ไธญ๏ผ่ฟๅๅธๅฐๅผ"""
result = WebDriverWait(self.driver, timeout, 1).until(EC.element_located_to_be_selected(loc))
return result
def is_selected_be(self, loc, selected=True, timeout=10):
"""
ๅคๆญๅ
็ด ็็ถๆ๏ผselectedๆฏๆๆ็ๅๆฐTrue/False
่ฟๅๅธๅฐๅผ
"""
result = WebDriverWait(self.driver, timeout, 1).until(EC.element_located_selection_state_to_be(loc, selected))
return result
def is_alert_present(self, timeout=10):
"""
ๅคๆญ้กต้ขๆฏๅฆๆalert
ๆ๏ผ่ฟๅalert(ๆณจๆ่ฟ้่ฟๅalert๏ผไธๆฏTrue)
ๆฒกๆ๏ผ่ฟๅFalse
"""
result = WebDriverWait(self.driver, timeout, 1).until(EC.alert_is_present())
return result
def is_visibility(self, loc, timeout=10):
"""ๅ
็ด ๅฏ่ง่ฟๅๆฌ่บซ๏ผไธๅฏ่ง่ฟๅFalse"""
result = WebDriverWait(self.driver, timeout, 1).until(EC.visibility_of_element_located(loc))
return result
def is_invisibility(self, loc, timeout=10):
"""ๅ
็ด ๅฏ่ง่ฟๅๆฌ่บซ๏ผไธๅฏ่ง่ฟๅTrue,ๆฒกๆพๅฐๅ
็ด ไน่ฟๅTrue"""
result = WebDriverWait(self.driver, timeout, 1).until(EC.invisibility_of_element_located(loc))
return result
def is_clickable(self, loc, timeout=10):
"""ๅ
็ด ๅฏไปฅ็นๅป่ฟๅๆฌ่บซ๏ผไธๅฏไปฅ็นๅป่ฟๅFalse"""
result = WebDriverWait(self.driver, timeout, 1).until(EC.element_to_be_clickable(loc))
return result
def is_located(self, loc, timeout=10):
"""ๅคๆญๅ
็ด ๆๆฒกๆ่ขซๅฎไฝๅฐ๏ผๅนถไธๆๅณ็ๅฏ่ง๏ผ๏ผๅฎไฝๅฐ่ฟๅelement๏ผๆฒกๅฎไฝๅฐไฝไผFalse"""
result = WebDriverWait(self.driver, timeout, 1).until(EC.presence_of_element_located(loc))
return result
<file_sep>#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Author : Eric
import unittest
from Librarys.logger import Logger
class basic_unittest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.logger = Logger(logger='ๆต่ฏ็จไพ').getlog()
cls.logger.info('--------ๆต่ฏๅผๅง--------')
@classmethod
def tearDownClass(cls):
cls.logger.info('--------ๆต่ฏ็ปๆ--------')
<file_sep>#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Author : Eric
import os
import time
import unittest
from Reports.Runner.HTMLTestRunner3_en import HTMLTestRunner
base_path = os.path.abspath('.')
case_path = os.path.join(base_path, 'TestCase')
result_path = os.path.join(base_path, 'Reports')
def create_suite():
"""ๅๅปบๆต่ฏ็จไพ้"""
test_suite = unittest.TestSuite()
# ๆฅๆพๆต่ฏ็จไพ็ฎๅฝไธ็ฌฆๅ่งๅ็็จไพ
discover = unittest.defaultTestLoader.discover(
start_dir=case_path,
pattern='*_case.py',
top_level_dir=None
)
for test_case in discover:
test_suite.addTests(test_case)
return test_suite
def report_info():
# ่ทๅ็ณป็ปๅฝๅๆถ้ด
now = time.strftime("_%Y-%m-%d_%H_%M_%S")
day = time.strftime("%Y-%m-%d")
# ๅฎไนๆต่ฏๆฅๅๆฏๅคฉ่ทฏๅพๅๆฅๅๅ็งฐ
result_paths = os.path.join(result_path, day)
file_name = os.path.join(result_paths, 'Result' + now + '.html')
return result_paths, file_name
def main():
test_suite = create_suite()
result_paths, file_name = report_info()
if not os.path.exists(result_paths):
os.mkdir(result_paths)
with open(file_name, 'wb') as fp:
runner = HTMLTestRunner(
stream=fp,
title='่ชๅจๅๆต่ฏๆฅๅ',
description=''
)
runner.run(test_suite)
if __name__ == '__main__':
main()
<file_sep>#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Author : Eric
import unittest
from TestCase.basic_unittest import basic_unittest
class DemoCase(basic_unittest):
def setUp(self):
pass
def test_demo(self):
pass
def tearDown(self):
pass
|
6e70cb2784ebaf082d9edac30362e55e72d945a2
|
[
"Python"
] | 5 |
Python
|
Eric130vv/vv_web_framework
|
06413fa93298341e84f39634a7fb49d21f388008
|
a296495a5f617028bc2bae4440b1d623e2718cfa
|
refs/heads/master
|
<file_sep>#coding:utf-8
import utils
import os
from datetime import datetime
from time import time
def run():
if not os.path.exists('bind.log'):
ctime = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
timestamp = time()
try:
bind = utils.bind()
except:
bind = 0
with open('bind.log', 'w') as f:
f.write('{}\t{}\t{}\n'.format(ctime, timestamp, bind))
if __name__ == '__main__':
run()
<file_sep>#coding:utf-8
import yaml
import logging
import logging.config
import codecs
import os
PATH = os.path.dirname(__file__)
DEBUG = False
LOC = '/dev/sda1'
logging.config.dictConfig(yaml.load(codecs.open(os.path.join(PATH, 'logging.yaml'), 'r', 'utf-8')))
HOST = 'http://172.16.17.30:8380/stat'
IP = '192.168.127.12'
<file_sep>#coding:utf-8
import os
import settings
import logging
logger = logging.getLogger('bind')
def cpu():
return '%.2f%%' % (float(os.popen("uptime | awk '{print $11}' | sed 's/,//g'").read().strip()) * 100,)
def memory():
total = float(os.popen("free -m | grep Mem | awk '{print $2}'").read().strip())
used = float(os.popen("free -m | grep /cache | awk '{print $3}'").read().strip())
return '%.2f%%' % (used / total * 100,)
def disk(loc):
return os.popen("df | grep %s | awk '{print $5}'| sed 's/G//g'" % (loc,)).read().strip()
def cache():
total = float(os.popen("free -m | grep Mem | awk '{print $2}'").read().strip())
used = float(os.popen("free -m | grep Mem | awk '{print $7}'").read().strip())
return '%.2f%%' % (used / total * 100,)
def bind():
try:
os.popen("rm -f /home/wkubuntu/named/logs/named.stats")
os.popen("/home/wkubuntu/named/sbin/rndc stats")
bind = os.popen("grep 'queries resulted in successful answer' /home/wkubuntu/named/logs/named.stats | awk '{print $1}'").read().strip()
bind = int(bind)
except Exception, e:
logger.error(str(e))
return 0
else:
return bind
<file_sep>#coding:utf-8
import tornado.web
import logging
import sha
import json
from tornado.web import HTTPError
from tornado.options import define, options, parse_command_line
from settings import DEBUG, LOC
from utils import cpu, cache, memory, disk, bind
from datetime import datetime
define('port', default=8380, type=int)
logger = logging.getLogger('monitor')
CACHE = {}
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r'^/stat/?$', StatHandler),
]
settings = {
'debug': DEBUG,
'cookie_secret': sha.sha('monitor').hexdigest(),
}
super(Application, self).__init__(handlers, **settings)
class BaseHandler(tornado.web.RequestHandler):
def initialize(self):
logger.info('<--- Enter %s --->', self.__class__.__name__)
logger.info('From %s', self.request.remote_ip)
logger.info('%s %s', self.request.method, self.request.uri)
def on_finish(self):
logger.info('<--- Exit %s --->\n', self.__class__.__name__)
def get(self):
raise HTTPError(405)
def post(self):
raise HTTPError(405)
def put(self):
raise HTTPError(405)
def delete(self):
raise HTTPError(405)
def write_json(self, data):
self.set_header('Content-Type', 'application/json')
self.write(json.dumps(data))
self.finish()
def write_error(self, status_code, **kwargs):
logger.info('STATUS_CODE %s', status_code)
exc_info = kwargs.get('exc_info', None)
reason = ''
if exc_info:
logger.error('%s %s', exc_info[0], exc_info[1].message)
reason = exc_info[1].message
self.write_json({'reason': reason, 'code': status_code})
def handle_exception(self, e):
import traceback
traceback.print_exc()
logger.error('Exception: %s', str(e))
self.write_json({'reason': str(e), 'code': 500})
def handle_success(self):
self.write_json({'reason': 'success', 'code': 200})
class StatHandler(BaseHandler):
def get(self):
try:
self.write_json(CACHE)
except Exception, e:
self.handle_exception(e)
def post(self):
try:
data = json.loads(self.get_argument('data'))
ip = data['ip']
if (ip not in CACHE) or (CACHE[ip]['timestamp'] < data['timestamp']):
CACHE[ip] = data
except Exception, e:
logger.info(CACHE)
self.handle_exception(e)
else:
self.handle_success()
def main():
logger.info('start server.\n')
parse_command_line()
app = Application()
app.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
main()
<file_sep>#!/opt/py2/bin/python2.7
#coding:utf-8
import os
import urllib
import urllib2
import utils
import logging
import traceback
import json
from settings import LOC, HOST, IP
from logging import StreamHandler, Formatter
from logging.handlers import TimedRotatingFileHandler
from datetime import datetime
from time import time
logger = logging.getLogger('worker')
def run():
try:
new_bind = utils.bind()
ctime = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
timestamp = time()
with open('/home/zhoujingzhong/monitor/bind.log', 'a+') as f:
old_bind = int(f.readlines()[-1].strip().split('\t')[-1])
f.write('{}\t{}\t{}\n'.format(ctime, timestamp, new_bind))
if new_bind >= old_bind:
new_bind -= old_bind
data = {'ip': IP,
'cpu': utils.cpu(),
'memory': utils.memory(),
'disk': utils.disk(LOC),
'cache': utils.cache(),
'bind': new_bind,
'timestamp': timestamp,
'ctime': ctime}
data = {'data': json.dumps(data)}
data = urllib.urlencode(data)
req = urllib2.Request(url=HOST, data=data)
ret = urllib2.urlopen(req)
ret = ret.read()
except Exception, e:
traceback.print_exc()
logger.error(str(e))
else:
logger.info('me = %s ret = %s', IP, ret)
if __name__ == '__main__':
run()
|
083883054d575f7d14472081120d9dc56faa23a3
|
[
"Python"
] | 5 |
Python
|
liqueur/monitor
|
cf6f903aa60550e94ab07bf26679bf4a074e58e2
|
2fcd48609902c2fb07a17522a2c6ee05c1b8f4a7
|
refs/heads/main
|
<file_sep>let arc = require('@architect/functions')
exports.handler = arc.http.async(destroy)
async function destroy (req) {
let data = await arc.tables()
await data.user.delete({userID: req.params.userID})
return {
location: '/'
}
}<file_sep>let arc = require('@architect/functions')
exports.handler = async function http(req) {
let data = await arc.tables()
return data.user.scan({})
}<file_sep># architect-serveless-poc<file_sep>// learn more about queue functions here: https://arc.codes/primitives/queues
exports.handler = async function queue (event) {
console.log(JSON.stringify(event, null, 2))
return
}
|
ea6e4b36b4118cd30b9856e8f5b5a647344d7703
|
[
"JavaScript",
"Markdown"
] | 4 |
JavaScript
|
fcsouza/architect-serveless-poc
|
c513d03d76245904af995413bd52da659998e848
|
b97eb736e9da17bc0afd3a0905b0b4226f2463ad
|
refs/heads/master
|
<file_sep>package com.hisham.strategy;
import java.util.ArrayList;
import java.util.List;
/**
* Created by Hisham on 17/Oct/2018 - 22:00
*/
public class Bill {
private List<LineItem> lineItems = new ArrayList<>();
public void addLineItem(LineItem lineItem) {
lineItems.add(lineItem);
}
public void removeLineItem(LineItem lineItem) {
lineItems.remove(lineItem);
}
private int getSum(){
int sum = 0;
for (LineItem lineItem : lineItems) {
int costInCents = lineItem.getCostInCents();
sum += costInCents;
}
return sum;
}
public void pay(PaymentMethod method){
method.pay(getSum());
}
}
<file_sep>package com.hisham.strategy;
/**
* Created by Hisham on 17/Oct/2018 - 21:50
*/
public interface PaymentMethod {
void pay(int amount);
}
<file_sep>package com.hisham.hishambasicandroidsamples.service_bound;
import android.content.ComponentName;
import android.content.Intent;
import android.content.ServiceConnection;
import android.os.Bundle;
import android.os.Handler;
import android.os.IBinder;
import android.os.Message;
import android.os.Messenger;
import android.os.RemoteException;
import android.support.v7.app.AppCompatActivity;
import android.util.Log;
import android.view.View;
import android.widget.Button;
import android.widget.EditText;
import android.widget.TextView;
import com.hisham.hishambasicandroidsamples.R;
import static com.hisham.hishambasicandroidsamples.util.Utils.printThreadInfo;
public class BoundedMessengerActivity extends AppCompatActivity {
private static final String TAG = "HishamSample";
private ServiceConnection mConn = new ServiceConnection() {
@Override
public void onServiceConnected(ComponentName componentName, IBinder iBinder) {
messenger = new Messenger(iBinder);
}
@Override
public void onServiceDisconnected(ComponentName componentName) {
Log.d(TAG, "onServiceDisconnected: " + componentName.flattenToShortString());
}
};
private Button btn_date;
private Messenger messenger;
private TextView tv;
private EditText et;
// This class handles the Service response
class ResponseHandler extends Handler {
@Override
public void handleMessage(Message msg) {
int respCode = msg.what;
switch (respCode) {
case MyLocalServiceUsingMessenger.TO_UPPER_CASE_RESPONSE: {
String result = msg.getData().getString("respData");
tv.setText(result);
}
}
}
}
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.bounded_messenger);
printThreadInfo("New Thread");
Intent intent = new Intent(BoundedMessengerActivity.this, MyLocalServiceUsingMessenger.class);
bindService(intent, mConn, BIND_AUTO_CREATE);
btn_date = findViewById(R.id.btn_date);
tv = findViewById(R.id.tv);
et = findViewById(R.id.et);
btn_date.setOnClickListener(view -> {
printThreadInfo("Button");
String val = et.getText().toString();
Message msg = Message.obtain(null, MyLocalServiceUsingMessenger.TO_UPPER_CASE);
msg.replyTo = new Messenger(new ResponseHandler());
// We pass the value
Bundle b = new Bundle();
b.putString("data", val);
msg.setData(b);
try {
messenger.send(msg);
} catch (RemoteException e) {
e.printStackTrace();
}
//btn_date.setText("" + messenger.getNumber());
// Toast.makeText(BoundedBinderActivity.this,mService.getNumber()+"",Toast.LENGTH_LONG).show();
});
}
}
<file_sep>package com.hisham.hishambasicandroidsamples.handler;
import android.os.Bundle;
import android.os.Handler;
import android.os.Looper;
import android.os.Message;
import android.support.design.widget.FloatingActionButton;
import android.support.design.widget.Snackbar;
import android.support.v7.app.AppCompatActivity;
import android.support.v7.widget.Toolbar;
import android.util.Log;
import android.view.View;
import android.widget.Button;
import com.hisham.hishambasicandroidsamples.R;
import java.util.concurrent.Callable;
import static android.os.Looper.prepare;
public class HandlerActivity extends AppCompatActivity {
private static final String TAG = "HandlerSample";
private AdvancedWorker worker;
private Button btn;
private Handler handler = new Handler(Looper.getMainLooper()){
@Override
public void handleMessage(Message msg) {
super.handleMessage(msg);
btn.setText(btn.getText() + " - " + ((CharSequence) msg.obj) + System.getProperty("line.separator"));
}
};
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_handler);
btn = findViewById(R.id.button);
Toolbar toolbar = findViewById(R.id.toolbar);
setSupportActionBar(toolbar);
btn.setOnClickListener(v -> {
System.exit(1);
});
Runnable runnable = () -> Log.d(TAG, "onCreate: runnable for handlerthread");
Thread mainThread = Looper.getMainLooper().getThread();
Log.d(TAG, "Thread ID: " + mainThread.getId() + " - " + mainThread.getName());
worker = new AdvancedWorker();
worker.addTasks(runnable);
worker.addTasks(() -> {
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
e.printStackTrace();
}
Log.d(TAG, "Thread ID: " + Thread.currentThread().getId() + " - " + Thread.currentThread().getName());
Message msg = Message.obtain();
msg.obj = "Task 1 ended";
handler.sendMessage(msg);
}).addTasks(() -> {
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
e.printStackTrace();
}
Log.d(TAG, "Thread ID: " + Thread.currentThread().getId() + " - " + Thread.currentThread().getName());
Message msg = Message.obtain();
msg.obj = "Task 2 ended";
handler.sendMessage(msg);
}).addTasks(() -> {
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
e.printStackTrace();
}
Log.d(TAG, "Thread ID: " + Thread.currentThread().getId() + " - " + Thread.currentThread().getName());
Message msg = Message.obtain();
msg.obj = "Task 3 ended";
handler.sendMessage(msg);
});
}
@Override
protected void onDestroy() {
super.onDestroy();
worker.quit();
}
}
<file_sep>package com.hisham.hishambasicandroidsamples.handler;
import android.util.Log;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Created by Hisham on 19/Sep/2018 - 20:32
*/
public class SimpleWorker extends Thread {
private static final String TAG = SimpleWorker.class.getSimpleName();
private AtomicBoolean alive = new AtomicBoolean(true);
private ConcurrentLinkedQueue<Runnable> taskQueue = new ConcurrentLinkedQueue<>();
public SimpleWorker() {
super(TAG);
start();
}
@Override
public void run() {
super.run();
while (alive.get()){
Runnable task = taskQueue.poll();
if(task != null)
task.run();
}
Log.d(TAG, "run: Thread terminated");
}
public SimpleWorker addTasks(Runnable r){
taskQueue.add(r);
return this;
}
public void quit(){
alive.compareAndSet(true, false);
}
}
<file_sep>package com.hisham.hishambasicandroidsamples.customview.fanview;
import android.content.Context;
import android.content.res.TypedArray;
import android.graphics.Canvas;
import android.graphics.Color;
import android.graphics.Paint;
import android.support.annotation.Nullable;
import android.util.AttributeSet;
import android.view.View;
import com.hisham.hishambasicandroidsamples.R;
/**
* Created by Hisham on 14/Oct/2018 - 12:24
*/
public class DialView extends View {
private static final int SELECTION_COUNT = 4; // total default selections
private int indicatorsCount; // indicators provided by user customized
private float width;
private float height;
private Paint textPaint;
private Paint dialPaint;
private float radiusCircle;
private int activeSelection;
private final StringBuffer tempLabel = new StringBuffer(8);
private final float[] tempResult = new float[2];
private int fanOffColor;
private int fanOnColor;
public DialView(Context context) {
super(context);
init(null, 0);
}
public DialView(Context context, @Nullable AttributeSet attrs) {
super(context, attrs);
init(attrs, 0);
}
public DialView(Context context, @Nullable AttributeSet attrs, int defStyleAttr) {
super(context, attrs, defStyleAttr);
init(attrs, defStyleAttr);
}
private void init(AttributeSet attrs, int defStyleAttr) {
// process attributes
fanOffColor = Color.GRAY; // default fan off color
fanOnColor = Color.GREEN; // default fan on color
if(attrs != null){
TypedArray typedArray = getContext().obtainStyledAttributes(attrs, R.styleable.DialView,
defStyleAttr, 0);
fanOnColor = typedArray.getColor(R.styleable.DialView_fan_on_color, fanOnColor);
fanOffColor = typedArray.getColor(R.styleable.DialView_fan_off_color, fanOffColor);
activeSelection = Math.abs(typedArray.getInt(R.styleable.DialView_default_position_of_fan_indicator, 0) % SELECTION_COUNT);
indicatorsCount = typedArray.getInt(R.styleable.DialView_indicators_count, SELECTION_COUNT);
typedArray.recycle();
}
textPaint = new Paint(Paint.ANTI_ALIAS_FLAG);
textPaint.setColor(Color.BLACK);
textPaint.setStyle(Paint.Style.FILL_AND_STROKE);
textPaint.setTextAlign(Paint.Align.CENTER);
textPaint.setTextSize(40f);
dialPaint = new Paint(Paint.ANTI_ALIAS_FLAG);
setDialPaintColor(activeSelection);
setOnClickListener(new OnClickListener() {
@Override
public void onClick(View v) {
activeSelection = (activeSelection + 1) % indicatorsCount;
setDialPaintColor(activeSelection);
invalidate();
}
});
}
private void setDialPaintColor(int activeSelection) {
if(activeSelection >= 1){
dialPaint.setColor(fanOnColor);
} else {
dialPaint.setColor(fanOffColor);
}
}
/**
*
* @param position current position being drawn
* @param radius radius
* @param isLabel Tells if text is being drawn.
* @return xy positions
*/
private float[] computeXYForPositionCustomIndicators(final int position, final float radius, boolean isLabel) {
if(indicatorsCount <= 6){
return computeXYForPosition(position, radius);
}
float[] result = tempResult;
Double startAngle = Math.PI * (3 / 2d);
Double angle = startAngle + (position * (Math.PI / indicatorsCount));
result[0] = (float) (radius * Math.cos(angle * 2)) + (width / 2);
result[1] = (float) (radius * Math.sin(angle * 2)) + (height / 2);
if ((angle > Math.toRadians(360)) && isLabel) {
result[1] += 20; // as text will be now drawn below the circle, so adding padding
}
return result;
}
private float[] computeXYForPosition(final int position, final float radius){
float[] result = tempResult;
Double startAngle = Math.PI * (9/8d);
Double angle = startAngle + (position * (Math.PI / 4));
result[0] = (float) (radius * Math.cos(angle)) + (this.width / 2);
result[1] = (float) (radius * Math.sin(angle)) + (this.height / 2);
return result;
}
@Override
protected void onSizeChanged(int w, int h, int oldw, int oldh) {
this.width = w;
this.height = h;
this.radiusCircle = (float) (Math.min(this.width, this.height) / 2 * 0.8);
}
@Override
protected void onDraw(Canvas canvas) {
super.onDraw(canvas);
canvas.drawCircle(this.width/2, this.height /2, radiusCircle, dialPaint); // draw dial
// draw text labels
final float labelRadius = radiusCircle + 20;
StringBuffer label = tempLabel;
for(int i = 0; i < indicatorsCount; i++){
float[] xyData = computeXYForPositionCustomIndicators(i, labelRadius, true);
float x = xyData[0];
float y = xyData[1];
label.setLength(0);
label.append(i);
canvas.drawText(label, 0, label.length(), x, y, textPaint);
}
// draw indicator
final float indicatorRadius = radiusCircle - 35;
float[] xyData = computeXYForPositionCustomIndicators(activeSelection, indicatorRadius, false);
canvas.drawCircle(xyData[0], xyData[1], 20, textPaint);
}
public int getFanOffColor() {
return fanOffColor;
}
public DialView setFanOffColor(int fanOffColor) {
this.fanOffColor = fanOffColor;
setDialPaintColor(activeSelection);
invalidate();
return this;
}
public int getFanOnColor() {
return fanOnColor;
}
public DialView setFanOnColor(int fanOnColor) {
this.fanOnColor = fanOnColor;
setDialPaintColor(activeSelection);
invalidate();
return this;
}
public int getIndicatorsCount() {
return indicatorsCount;
}
public DialView setIndicatorsCount(int indicatorsCount) {
// if(indicatorsCount > 20){
// this.indicatorsCount = 20;
// } else {
this.indicatorsCount = indicatorsCount;
// }
invalidate();
return this;
}
}
<file_sep>package com.hisham.hishambasicandroidsamples.customview.clearedittext;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import com.hisham.hishambasicandroidsamples.R;
public class ClearEditTextActivity extends AppCompatActivity {
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_clear_edit_text);
}
}
<file_sep>package com.hisham.javalibrary.lruCacheImpl
import java.util.*
/**
* Created by Hisham on 05/Nov/2018 - 21:30
*/
class MyLruCacheDeque(private val cacheSize: Int) {
data class MyModel (val key: Int, val data: String)
private val deque: Deque<MyModel>
private val map: HashMap<Int, MyModel> = HashMap(cacheSize)
private var hit: Int = 0
private var miss: Int =0
init {
deque = LinkedList()
}
private fun createModel(key: Int): MyModel = MyModel(key, "Value: $key")
fun putItem(key: Int): String {
val value = createModel(key)
if(deque.size >= cacheSize) { // make space
if(map.containsKey(key)){ // hit
if(deque.remove(map[key]))
deque.addFirst(map[key])
} else { // miss
val removeLast = deque.removeLast()
map.remove(removeLast.key)
deque.addFirst(value)
map[key] = value
}
} else {
deque.addFirst(value)
map[key] = value
}
return value.data
}
fun getItem(key: Int): String? {
return if(map.containsKey(key)) {
hit++
map[key]?.data
} else {
miss++
putItem(key)
}
}
fun currentSizeMap(): Int = map.size
fun currentSizeDeque(): Int = deque.size
fun getHitCount(): Int = hit
fun getMissCount(): Int = miss
fun printCacheDetails() {
val iterator = deque.iterator()
while(iterator.hasNext()){
val (key, data) = iterator.next()
System.out.println(data)
}
}
}<file_sep>package com.hisham.hishambasicandroidsamples.service_bound;
import android.os.Handler;
import android.os.HandlerThread;
import android.os.Looper;
import android.util.Log;
import static com.hisham.hishambasicandroidsamples.util.Utils.printThreadInfo;
public class WorkerThread extends HandlerThread {
private static final String TAG = "HishamSample";
private Handler handler;
public WorkerThread() {
super("Worker");
start();
handler = new Handler(getLooper());
printThreadInfo("inside constructor worker");
}
public void addTask(Runnable r){
printThreadInfo("inside worker");
handler.post(r);
}
}
<file_sep>package com.hisham.javalibrary.kotlinBasics.higerOrderFunctions
/**
* Created by Hisham on 07/Nov/2018 - 17:57
*/
class BenchMarkingChild : BenchmarkingParent() {
// fun doSomething(){
// doParent()
// }
}<file_sep>package com.hisham.javalibrary.kotlinBasics.higerOrderFunctions
/**
* Created by Hisham on 07/Nov/2018 - 17:50
*/
object Benchmarking {
@JvmStatic
fun main(args: Array<String>) {
benchmarkingProcess()
BenchMarkingChild().doParent()
}
private fun benchmarkingProcess() {
val benchmark = benchmark {
for (i in 0..10000000000L) Object()
}
println("Executed in : $benchmark ms")
// val time = System.currentTimeMillis()
// for (i in 0..10000000000) {
//// println("Object: ${java.lang.Object()} - $i")
// java.lang.Object()
// }
// println("Total time taken : ${System.currentTimeMillis() - time} ms")
}
private fun benchmark(block: () -> Unit): Long {
val time = System.currentTimeMillis()
block()
return System.currentTimeMillis() - time
}
}<file_sep>package com.hisham.javalibrary.kotlinBasics.higerOrderFunctions
/**
* Created by Hisham on 07/Nov/2018 - 17:58
*/
open class BenchmarkingParent {
fun doParent(){
println("parenting")
}
}<file_sep>package com.hisham.hishambasicandroidsamples.service_bound;
import android.app.Service;
import android.content.Intent;
import android.os.Handler;
import android.os.IBinder;
import android.os.Looper;
import android.os.Message;
import android.os.Process;
import android.os.RemoteException;
import android.support.annotation.Nullable;
import android.util.Log;
import com.hisham.hishambasicandroidsamples.IRemoteService;
import com.hisham.hishambasicandroidsamples.util.Utils;
import java.util.Random;
import static com.hisham.hishambasicandroidsamples.util.Utils.printThreadInfo;
public class MyLocalServiceUsingAIDL extends Service {
private static final String TAG = "HishamSample";
public static final int TO_UPPER_CASE = 1;
public static final int TO_UPPER_CASE_RESPONSE = 2;
private Message initialMsg;
private int variable = 0;
@Override
public void onCreate() {
super.onCreate();
}
// private void replyBack(Message msg) {
// // This is the action
// int msgType = msg.what;
// switch(msgType) {
// case TO_UPPER_CASE: {
// try {
// // Incoming data
// String data = msg.getData().getString("data");
// Message resp = Message.obtain(null, TO_UPPER_CASE_RESPONSE);
// Bundle bundle = new Bundle();
// bundle.putString("respData", data.toUpperCase());
// resp.setData(bundle);
//
// msg.replyTo.send(resp);
// }
// catch (RemoteException e) {
// e.printStackTrace();
// }
// break;
// }
//// default:
//// super.handleMessage(msg);
// }
// }
private IRemoteService.Stub binder = new IRemoteService.Stub() {
@Override
public int getPid() throws RemoteException {
Utils.printThreadInfo("IRemote server: ");
variable++;
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
return Process.myPid();
}
@Override
public long getThreadId() throws RemoteException {
Utils.printThreadInfo("IRemote server: ");
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
return Thread.currentThread().getId();
}
@Override
public String getThreadName() throws RemoteException {
return Thread.currentThread().getName();
}
@Override
public int getVariable() throws RemoteException {
return variable;
}
@Override
public void basicTypes(int anInt, String aString) throws RemoteException {
}
};
@Nullable
@Override
public IBinder onBind(Intent intent) {
printThreadInfo("onBind");
return binder;
// return messenger.getBinder();
}
@Override
public boolean onUnbind(Intent intent) {
return super.onUnbind(intent);
}
private Handler handler = new Handler(Looper.myLooper()) {
@Override
public void handleMessage(Message msg) {
super.handleMessage(msg);
printThreadInfo("handleMessage");
Log.d(TAG, "handleMessage: msg" + msg);
// replyBack(msg);
}
};
private Runnable runnable = () -> {
printThreadInfo("getNumber");
// time consuming task
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
e.printStackTrace();
}
int i = new Random().nextInt();
Log.d(TAG, "getNumber: " + i);
// replyBack(initialMsg);
handler.sendMessage(initialMsg);
};
public void getNumber(Message msg) {
initialMsg = Message.obtain(msg);
WorkerThread thread = new WorkerThread();
thread.addTask(runnable);
}
@Override
public void onDestroy() {
super.onDestroy();
Log.d(TAG, "onDestroy: Service destroyed");
}
}<file_sep>package com.hisham.factory;
import org.apache.commons.codec.digest.DigestUtils;
/**
* Created by Hisham on 17/Oct/2018 - 20:27
*/
public class Sha512Algo implements EncryptionAlgorithm {
@Override
public String decrypt(String plainText) {
return DigestUtils.sha512Hex(plainText);
}
}
<file_sep>package com.hisham.javalibrary.lruCacheImpl
/**
* Created by Hisham on 05/Nov/2018 - 00:15
*/
object LruCacheDriver {
@JvmStatic
fun main(args: Array<String>){
val myLruCache = MyLruCacheDeque(6)
myLruCache.getItem(1)
myLruCache.getItem(2)
myLruCache.getItem(3)
myLruCache.getItem(4)
myLruCache.getItem(5)
myLruCache.getItem(1)
myLruCache.getItem(6)
myLruCache.getItem(7)
myLruCache.getItem(8)
myLruCache.getItem(7)
myLruCache.printCacheDetails()
System.out.println("Current map size: ${myLruCache.currentSizeMap()}" )
System.out.println("Current deque size: ${myLruCache.currentSizeDeque()}" )
myLruCache.getItem(1)
myLruCache.getItem(9)
myLruCache.getItem(10)
// myLruCache.remove(10)
// myLruCache.remove(4)
// myLruCache.remove(8)
// myLruCache.get(1)
// myLruCache.get(10)
// myLruCache.get(15)
// myLruCache.get(10)
// myLruCache.get(12)
// myLruCache.get(18)
// myLruCache.get(13)
myLruCache.printCacheDetails()
System.out.println("Current map size: ${myLruCache.currentSizeMap()}" )
System.out.println("Current deque size: ${myLruCache.currentSizeDeque()}" )
System.out.println("Hits: ${myLruCache.getHitCount()}" )
System.out.println("Misses: ${myLruCache.getMissCount()}" )
}
}<file_sep>package com.hisham.javalibrary.lruCacheImpl
import sun.misc.LRUCache
/**
* Created by Hisham on 04/Nov/2018 - 23:24 - Not written correctly, broken
*/
class MyLruCache(private val cacheSize: Int) {
private data class MyEntry(val key: Int) {
// var key: Int? = null
var value: String? = null
var nextEntry: MyEntry? = null
var previousEntry: MyEntry? = null
override fun toString(): String {
return "MyEntry[ key: $key, $value]"
}
}
private val map: HashMap<Int, MyEntry> = HashMap(cacheSize)
private var front: MyEntry? = null // front of doubly LL
private var rear: MyEntry? = null // rear of doubly LL
fun get(key: Int?): String? {
if (key == null) return null
// if map doesn't contains the key
return if (!map.containsKey(key)) {
val entry = MyEntry(key)
entry.value = getValueFromKey(key)
if(front == null){
front = entry
rear = entry
}
if (map.size >= cacheSize) { // map size need to be trimmed before addition
removeFromKey(rear?.key)
}
moveEntryToTop(entry)
map[key] = entry
// add(key)
null
} else {
// its a hit, move the entry to front
val myEntry = map[key]
val removeEntry = removeFromKey(myEntry?.key)
moveEntryToTop(removeEntry!!)
removeEntry.value
}
}
fun remove(key: Int?): String? {
return removeFromKey(key)?.value
}
private fun removeFromKey(key: Int?): MyEntry? {
if (key == null) return null
if(!map.containsKey(key)) return null
val removeEntry = map[key]
var localFront = front
var localRear = rear
if(removeEntry == localFront && localFront != null){
localFront = localFront.nextEntry
localFront?.previousEntry = null
front = localFront
} else if(removeEntry == localRear && localRear != null){
localRear = localRear.previousEntry
localRear?.nextEntry = null
rear = localRear
} else {
val removePrev = removeEntry?.previousEntry
val removeNext = removeEntry?.nextEntry
removePrev?.nextEntry = removeNext
removeNext?.previousEntry = removePrev
}
map.remove(key)
return removeEntry
}
private fun moveEntryToTop(hitEntry: MyEntry) {
if(hitEntry == front){
// do nothing, item is already on front
return
}
if(hitEntry == rear){
// last entry was hit and it was rear too, move rear to its previous
rear = hitEntry.previousEntry
}
val hitsPrev = hitEntry.previousEntry
val hitsNext = hitEntry.nextEntry
if (hitsPrev != null) hitsPrev.nextEntry = hitsNext
if (hitsNext != null) hitsNext.previousEntry = hitsPrev
// Hit entry is removed from the position and linked list is mended
// Now moving the entry to first position
hitEntry.previousEntry = null
hitEntry.nextEntry = front
front = hitEntry
}
private fun getValueFromKey(key: Int): String {
// value is fetched from some place
return "Value: $key"
}
fun printCacheDetails(){
printEntries(front)
}
private fun printEntries(front: MyEntry?) {
if(front == null) {
return
}
System.out.println("$front")
printEntries(front.nextEntry)
}
}<file_sep>package com.hisham.hishambasicandroidsamples.service_bound;
import android.content.ComponentName;
import android.content.Intent;
import android.content.ServiceConnection;
import android.os.Bundle;
import android.os.IBinder;
import android.os.RemoteException;
import android.support.v7.app.AppCompatActivity;
import android.util.Log;
import android.widget.Button;
import android.widget.EditText;
import android.widget.TextView;
import com.hisham.hishambasicandroidsamples.IRemoteService;
import com.hisham.hishambasicandroidsamples.R;
import static com.hisham.hishambasicandroidsamples.util.Utils.printThreadInfo;
public class BoundedAIDLActivity extends AppCompatActivity {
private static final String TAG = "HishamSample";
private ServiceConnection mConn = new ServiceConnection() {
@Override
public void onServiceConnected(ComponentName componentName, IBinder iBinder) {
service = IRemoteService.Stub.asInterface(iBinder);
}
@Override
public void onServiceDisconnected(ComponentName componentName) {
Log.d(TAG, "onServiceDisconnected: " + componentName.flattenToShortString());
}
};
private Button btn_date;
private IRemoteService service;
private TextView tv;
private EditText et;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.bounded_messenger);
new Thread() {
@Override
public void run() {
super.run();
printThreadInfo("New Thread");
Intent intent = new Intent(BoundedAIDLActivity.this, MyLocalServiceUsingAIDL.class);
bindService(intent, mConn, BIND_AUTO_CREATE);
}
}.start();
btn_date = findViewById(R.id.btn_date);
tv = findViewById(R.id.tv);
et = findViewById(R.id.et);
btn_date.setOnClickListener(view -> {
printThreadInfo("Button");
tv.setText(tv.getText().toString() + System.getProperty("line.separator"));
new Thread() {
@Override
public void run() {
printThreadInfo("T: ");
super.run();
int pid = 0;
long threadID = 0;
try {
pid = service.getPid();
threadID = service.getThreadId();
} catch (RemoteException e) {
e.printStackTrace();
}
int finalPid = pid;
long finalThreadID = threadID;
runOnUiThread(new Runnable() {
@Override
public void run() {
try {
tv.setText("Process ID: " + finalPid + "Thread: " + finalThreadID + " - " + service.getThreadName() + " Counter: " + service.getVariable() + System.getProperty("line.separator"));
} catch (RemoteException e) {
e.printStackTrace();
}
}
});
}
}.start();
new Thread() {
@Override
public void run() {
printThreadInfo("T: ");
super.run();
int pid = 0;
long threadID = 0;
try {
pid = service.getPid();
threadID = service.getThreadId();
} catch (RemoteException e) {
e.printStackTrace();
}
int finalPid = pid;
long finalThreadID = threadID;
runOnUiThread(new Runnable() {
@Override
public void run() {
// tv.setText(tv.getText().toString() + " ---- Process ID: " + finalPid + " -- Thread: " + finalThreadID);
}
});
}
}.start();
new Thread() {
@Override
public void run() {
printThreadInfo("T: ");
super.run();
int pid = 0;
long threadID = 0;
try {
pid = service.getPid();
threadID = service.getThreadId();
} catch (RemoteException e) {
e.printStackTrace();
}
int finalPid = pid;
long finalThreadID = threadID;
runOnUiThread(new Runnable() {
@Override
public void run() {
// tv.setText(tv.getText().toString() + " ---- Process ID: " + finalPid + " -- Thread: " + finalThreadID);
}
});
}
}.start();
new Thread() {
@Override
public void run() {
printThreadInfo("T: ");
super.run();
int pid = 0;
long threadID = 0;
try {
pid = service.getPid();
threadID = service.getThreadId();
} catch (RemoteException e) {
e.printStackTrace();
}
int finalPid = pid;
long finalThreadID = threadID;
runOnUiThread(new Runnable() {
@Override
public void run() {
// tv.setText(tv.getText().toString() + " ---- Process ID: " + finalPid + " -- Thread: " + finalThreadID);
}
});
}
}.start();
new Thread() {
@Override
public void run() {
printThreadInfo("T: ");
super.run();
int pid = 0;
long threadID = 0;
try {
pid = service.getPid();
threadID = service.getThreadId();
} catch (RemoteException e) {
e.printStackTrace();
}
int finalPid = pid;
long finalThreadID = threadID;
runOnUiThread(new Runnable() {
@Override
public void run() {
// tv.setText(tv.getText().toString() + " ---- Process ID: " + finalPid + " -- Thread: " + finalThreadID);
}
});
}
}.start();
new Thread() {
@Override
public void run() {
printThreadInfo("T: ");
super.run();
int pid = 0;
long threadID = 0;
try {
pid = service.getPid();
threadID = service.getThreadId();
} catch (RemoteException e) {
e.printStackTrace();
}
int finalPid = pid;
long finalThreadID = threadID;
runOnUiThread(new Runnable() {
@Override
public void run() {
// tv.setText(tv.getText().toString() + " ---- Process ID: " + finalPid + " -- Thread: " + finalThreadID);
}
});
}
}.start();
new Thread() {
@Override
public void run() {
printThreadInfo("T: ");
super.run();
int pid = 0;
long threadID = 0;
try {
pid = service.getPid();
threadID = service.getThreadId();
} catch (RemoteException e) {
e.printStackTrace();
}
int finalPid = pid;
long finalThreadID = threadID;
runOnUiThread(new Runnable() {
@Override
public void run() {
// tv.setText(tv.getText().toString() + " ---- Process ID: " + finalPid + " -- Thread: " + finalThreadID);
}
});
}
}.start();
//btn_date.setText("" + messenger.getNumber());
// Toast.makeText(BoundedBinderActivity.this,mService.getNumber()+"",Toast.LENGTH_LONG).show();
});
}
}
<file_sep>package com.hisham.hishambasicandroidsamples.widgetz;
import android.app.PendingIntent;
import android.appwidget.AppWidgetManager;
import android.appwidget.AppWidgetProvider;
import android.content.Context;
import android.content.Intent;
import android.content.SharedPreferences;
import android.widget.RemoteViews;
import android.widget.Toast;
import com.hisham.hishambasicandroidsamples.R;
import java.text.DateFormat;
import java.util.Date;
/**
* Implementation of App Widget functionality.
* App Widget Configuration implemented in {@link NewAppWidgetConfigureActivity NewAppWidgetConfigureActivity}
*/
public class NewAppWidget extends AppWidgetProvider {
private static final String COUNT_KEY = "count";
private static final String mSharedPrefFile = "com.example.android.appwidgetsample";
static void updateAppWidget(Context context, AppWidgetManager appWidgetManager,
int appWidgetId) {
SharedPreferences prefs = context.getSharedPreferences(
mSharedPrefFile, 0);
int count = prefs.getInt(COUNT_KEY + appWidgetId, 0);
count++;
String dateString = DateFormat.getTimeInstance(DateFormat.SHORT).format(new Date());
CharSequence widgetText = NewAppWidgetConfigureActivity.loadTitlePref(context, appWidgetId);
// Construct the RemoteViews object
RemoteViews views = new RemoteViews(context.getPackageName(), R.layout.new_app_widget);
views.setTextViewText(R.id.appwidget_text, widgetText);
views.setTextViewText(R.id.appwidget_id, String.valueOf(appWidgetId));
views.setTextViewText(R.id.appwidget_update, context.getResources().getString(R.string.date_count_format, count, dateString));
Intent btnUpdateIntent = new Intent(context, NewAppWidget.class);
btnUpdateIntent.setAction(AppWidgetManager.ACTION_APPWIDGET_UPDATE);
btnUpdateIntent.putExtra(AppWidgetManager.EXTRA_APPWIDGET_IDS, new int[]{appWidgetId});
PendingIntent pendingUpdate = PendingIntent.getBroadcast(context, appWidgetId, btnUpdateIntent,
PendingIntent.FLAG_UPDATE_CURRENT);
views.setOnClickPendingIntent(R.id.button_update, pendingUpdate);
Intent intent = new Intent(context, NewAppWidgetConfigureActivity.class);
PendingIntent pendingIntent = PendingIntent.getActivity(context, 0, intent, PendingIntent.FLAG_UPDATE_CURRENT);
views.setOnClickPendingIntent(R.id.appwidget_text, pendingIntent);
SharedPreferences.Editor prefEditor = prefs.edit();
prefEditor.putInt(COUNT_KEY + appWidgetId, count);
prefEditor.apply();
// Instruct the widget manager to update the widget
appWidgetManager.updateAppWidget(appWidgetId, views);
}
@Override
public void onUpdate(Context context, AppWidgetManager appWidgetManager, int[] appWidgetIds) {
// There may be multiple widgets active, so update all of them
for (int appWidgetId : appWidgetIds) {
updateAppWidget(context, appWidgetManager, appWidgetId);
}
}
@Override
public void onDeleted(Context context, int[] appWidgetIds) {
// When the user deletes the widget, delete the preference associated with it.
for (int appWidgetId : appWidgetIds) {
NewAppWidgetConfigureActivity.deleteTitlePref(context, appWidgetId);
Toast.makeText(context, "onDeleted called in widget", Toast.LENGTH_SHORT).show();
}
}
@Override
public void onEnabled(Context context) {
// Enter relevant functionality for when the first widget is created
Toast.makeText(context, "onEnabled called in widget", Toast.LENGTH_SHORT).show();
}
@Override
public void onDisabled(Context context) {
// Enter relevant functionality for when the last widget is disabled
Toast.makeText(context, "onDisabled called in widget", Toast.LENGTH_SHORT).show();
}
}
<file_sep>package com.hisham.javalibrary.producer_consumer;
import java.util.ArrayDeque;
import java.util.Queue;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
/**
* Created by Hisham on 23/Oct/2018 - 18:33
*/
public class ProducerConsumerMain {
private int producerId = 0;
private int limit = 5;
private final Queue<Item> itemQueue = new ArrayDeque<>();
private Consumer consumer;
private Producer producer;
public static void main(String args[]) {
new ProducerConsumerMain().pc();
// while (true){
// System.out.println("Active threads: " + Thread.activeCount());
// }
}
private static final class Lock {
private volatile String lockedBy = "tt";
public synchronized String getLockedBy() {
return lockedBy;
}
public synchronized Lock setLockedBy(String lockedBy) {
this.lockedBy = lockedBy;
return this;
}
}
// private final Lock lock = new ReentrantLock();
private final Lock lock = new Lock();
private void pc() {
producer = new Producer() {
@Override
public void produce() throws InterruptedException {
synchronized (lock) {
// if (lock.tryLock()) {
// try {
printThreadInfo(Thread.currentThread());
while (itemQueue.size() >= limit || lock.getLockedBy().equalsIgnoreCase("producer")) {
System.out.println(Thread.currentThread().getName() + " waiting.");
lock.setLockedBy("producer").wait();
}
lock.setLockedBy("none");
System.out.println(Thread.currentThread().getName() + " started execution.");
Item item = new Item(producerId++);
itemQueue.add(item);
System.out.println("Item produced: " + producerId + " - Q Size: " + itemQueue.size());
lock.notifyAll();
// } finally {
// lock.unlock();
// }
}
}
};
consumer = new Consumer() {
@Override
public void consume() throws InterruptedException {
synchronized (lock) {
// if (lock.tryLock()) {
// try {
printThreadInfo(Thread.currentThread());
while (itemQueue.size() <= 0 || lock.getLockedBy().equalsIgnoreCase("consumer")) {
System.out.println(Thread.currentThread().getName() + " waiting.");
lock.setLockedBy("consumer").wait();
}
lock.setLockedBy("none");
System.out.println(Thread.currentThread().getName() + " started execution.");
Item poll = itemQueue.poll();
lock.notifyAll();
if (poll != null)
System.out.println("Item consumed: " + poll.getId() + " Q Size: " + itemQueue.size());
else
System.out.println("Item is " + poll);
// } finally {
// lock.unlock();
// }
}
}
};
new Thread("Consumer") {
@Override
public void run() {
while (true) {
try {
// sleep();
consumer.consume();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
}.start();
new Thread("Consumer2") {
@Override
public void run() {
while (true) {
try {
// sleep(100);
consumer.consume();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
}.start();
// new Thread("Consumer3") {
// @Override
// public void run() {
// while (true) {
// try {
//// sleep(100);
// consumer.consume();
// } catch (InterruptedException e) {
// e.printStackTrace();
// }
// }
// }
// }.start();
new Thread("Producer") {
@Override
public void run() {
while (true) {
try {
// sleep(100);
producer.produce();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
}.start();
new Thread("Producer2"){
@Override
public void run() {
while (true) {
try {
// sleep(57);
producer.produce();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
}.start();
}
private void printThreadInfo(Thread thread) {
long threadId = thread.getId();
String threadName = thread.getName();
System.out.println("Thread info: " + threadName + "-" + threadId);
}
}
<file_sep>package com.hisham.javalibrary.kotlinBasics.letItRunWithAlsoKeywords
import java.awt.Color
import java.awt.Color.RED
/**
* Created by Hisham on 07/Nov/2018 - 22:58
*/
object KeywordsTest {
@JvmStatic
fun main(args: Array<String>) {
val str = "Hello world"
val length = str.let {
println(it)
it.length
}
val a = 1
val b = 2
val x = a.let { it + 2 }.let { val i = it + b; i }
println(x) //5
var y = "Data"
y.let { (it + "Hi").let { it -> println("Inner is $it and outer is $it") } }
var name: String? = "Kotlin let null check"
name?.let { println(it) } //prints Kotlin let null check
var name2 = name?.let { "Hi $it" }
name = null
name?.let { println(it) } //nothing happens
println(name2)
var p: String? = null
val any: String? = p?.let {
println("p is $p")
it
} ?: run {
println("p was null. Setting default value to: ")
p = "Kotlin"
p
}
println(p)
println(any)
//Prints
//p was null. Setting default value to:
//Kotlin
data class Person(var name: String, var tutorial : String)
var person = Person("Anupam", "Kotlin")
val apply = person.apply {
this.tutorial = "Swift"
}
println(person)
val apple: Apple = Apple(RED)
val redApple = apple.takeIf { it.color == RED }
val otherApple = apple.takeUnless { it.color == RED }
println(redApple)
println(otherApple)
fun task(): List<Boolean> {
val isEven: Int.() -> Boolean = { this % 2 == 0 }
val isOdd: Int.() -> Boolean = { this % 2 != 0 }
return listOf(42.isOdd(), 239.isOdd(), 294823098.isEven())
}
}
data class Apple(var color: Color)
}<file_sep>package com.hisham.javalibrary.hashcode_equals_hashcollections;
/**
* Created by Hisham on 29/Oct/2018 - 01:37
*/
public class PojoObject {
private int importantField;
private String otherField;
public PojoObject(int importantField, String otherField) {
this.importantField = importantField;
this.otherField = otherField;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
PojoObject that = (PojoObject) o;
if (importantField != that.importantField) return false;
return true;//otherField != null ? otherField.equals(that.otherField) : that.otherField == null;
}
// @Override
// public int hashCode() {
// return importantField;
// }
}
<file_sep>package com.hisham.javalibrary;
import com.hisham.javalibrary.dst.LongestPalinSubstring;
import com.hisham.javalibrary.threading.ThreadSample;
import kotlin.Pair;
public class MyClass {
private static final ThreadSample sample = new ThreadSample("Sample1");
void test() throws InterruptedException {
ThreadSample sample2 = new ThreadSample("Sample2");
sample.start(); // main thread
sample.setSample(sample2);
sample2.start();
sample2.setSample(sample2);
// sample.setMyClass(this);
// synchronized (synchronizedsample2) {
sample2.wait(); // main thread
// }thread
System.out.println("Main thread execution finished");
Pair<Integer, Integer> integer;
}
public static void main(String args[]) throws InterruptedException {
// System.out.println("Hello Android from Java");
// MyClass myClass = new MyClass();
// myClass.test();
// ========== Executors
// ExecutorService executor = Executors.newFixedThreadPool(5);
// for (int i = 0; i < 10; i++) {
// Runnable worker = new WorkerThread("" + i);
// executor.execute(worker);//calling execute method of ExecutorService
// }
// executor.shutdown();
// while (!executor.isTerminated()) { }
//
// System.out.println("Finished all threads");
// ============ Deadlock
// DeadlockExample.test();
// ============== InetAddress
// try {
// System.out.print(InetAddress.getByName("www.google.com").getHostAddress());
// } catch (UnknownHostException e) {
// e.printStackTrace();
// }
String str = "forgeeksskeegfor";
// String str = "abaxabaxabb";
System.out.println("Length is: " + LongestPalinSubstring.longestPalSubstr(str));
}
}
<file_sep>include ':app', ':hishamclient', ':JavaLibrary', ':passwordedittext', ':factory'
|
e697204776589f160ad03848ac5dd104f4b8d8f7
|
[
"Java",
"Kotlin",
"Gradle"
] | 23 |
Java
|
hishamMuneer/HishamBasicAndroidSamples
|
d2fbd6dea3c8f9563b589d86aefb6d6f7d6eba09
|
f44245e817d35ad53377730149ed477658bbdb13
|
refs/heads/main
|
<repo_name>Kevin0309-Anthony/BD<file_sep>/SQLQuery1.sql
๏ปฟselect nombre, email from Proveedores where nombre like '%Flores%'
insert into Proveedores (codigo,nombre,nombreEmpresa,pais,telefono,email) values ('003','<NAME>','La curacao','El Salvador','2428-3033','<EMAIL>')
delete from Proveedores where idProveedores=23
update Proveedores set codigo='003', nombre='<NAME>', nombreEmpresa='Omnisport', pais='Noruega', telefono='8228-2828', email='<EMAIL>' where idProveedores=11
update Proveedores set codigo='003', nombre='<NAME>', nombreEmpresa='OLX', pais='Italia', telefono='2628-8281', email='<EMAIL>' where idProveedores=12
|
cdc42b75a49cfa99c4ac7b75578f9d4375cf68e9
|
[
"SQL"
] | 1 |
SQL
|
Kevin0309-Anthony/BD
|
e36e61521f888c096ec3f608343140b28b5b13b6
|
2f3a8a5b37185284bb3990391f58d1ef1e24c93a
|
refs/heads/master
|
<file_sep>from reportlab.pdfgen import canvas
from PIL import Image
import barcode
from barcode.writer import ImageWriter
from flask import Flask, request
from flask_cors import CORS
import cups
conn = cups.Connection()
printers = conn.getPrinters()
printer_name = printers.keys()[0]
app = Flask(__name__)
CORS(app)
def print_file(printer_name):
conn.printFile(printer_name, "ting.pdf", "Roman Kiosk Server Print", {})
@app.route("/", methods=["GET", "POST"])
def foo():
jsonified = request.json
jsonified["total"] = "$" + str(jsonified["total"])
ordered_items = []
for key in jsonified:
if jsonified[key] != 0:
ordered_items.append(key + ": " + str(jsonified[key]))
c = canvas.Canvas("ting.pdf", pagesize=(288, 144))
c.setFont("Helvetica", 10, leading=None)
c.drawCentredString(144, 117, "ROMAN KIOSK MOBILE ORDER")
c.setFont("Helvetica", 5, leading=None)
for i in range(len(ordered_items)):
xcoord = 17
ycoord = (117 - 5) - ((10 * (i + 1)))
c.drawString(17, ycoord, ordered_items[i])
code39 = barcode.get_barcode_class('code39')
encoded_name = code39("boi", writer=ImageWriter())
encoded_name.save("new_file")
barcode_png = Image.open("new_file.png")
cropped_barcode = barcode_png.crop((0, 0, barcode_png.size[0], barcode_png.size[1]*.7))
resized_cropped_barcode = cropped_barcode.thumbnail((72, 72))
cropped_barcode.save("resized_new_file.png")
c.drawImage("resized_new_file.png", 108, 0, width=None, height=None)
c.save()
print_file(printer_name)
return "hello"
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
|
efd8a5d642c1bc508b0e68c25e6f690ba552237d
|
[
"Python"
] | 1 |
Python
|
latincsclub/rasPi_print_module
|
97c0f6b963c0a387953f45a222c04df8f73f7e6c
|
0932314862660b89783cd7763a35e9c9e9f61934
|
refs/heads/master
|
<repo_name>naomistnl/Calculator<file_sep>/CalcTwo.java
import java.util.Scanner;
public class CalcTwo {
public static void main(String[] args) {
int num1;
int num2;
int num3;
Scanner input = new Scanner(System.in);
System.out.println("Enter your first number ");
num1 = input.nextInt();
System.out.println("Enter your second number ");
num2 = input.nextInt();
System.out.println("Enter your third number ");
num3 = input.nextInt();
if (num1 + num2 == num3) {
System.out.println(num1 + " + " + num2 + " = " + num3);
} else {
System.out.println(num1 + " + " + num2 + " is not " + num3 + " Try again!");
}
if (num1 - num2 == num3) {
System.out.println(num1 + " + " + num2 + " = " + num3);
} else {
System.out.println(num1 + " - " + num2 + " is not " + num3 + " Try again!");
}
if (num1 * num2 == num3) {
System.out.println(num1 + " * " + num2 + " = " + num3);
} else {
System.out.println(num1 + " * " + num2 + " is not " + num3 + " Try again!");
}
if (num1 / num2 == num3) {
System.out.println(num1 + " / " + num2 + " = " + num3);
} else {
System.out.println(num1 + " / " + num2 + " is not " + num3 + " Try again!");
}
}
}
|
17f8ad524db4306dcbecddb24ae2f07279ee5711
|
[
"Java"
] | 1 |
Java
|
naomistnl/Calculator
|
b989b71ea98f69ff313c5e232b40f61c9e6e6413
|
482937fb8c5c945165735b2a9fc043d64bd61ceb
|
refs/heads/master
|
<file_sep>/************************************************************************\
* Description: Dynamic way of creating device file
* Author : <NAME>
* File name : lab5_chr_dev_udev.c *
* compilation: use our qc script *
* ./qc lab5_chr_dev_udev *
* insmod lab5_chr_dev_udev.ko *
* *
* Invoking driver with our test_application.c *
* ./test-appl
************************************************************************/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/init.h>
#include <linux/cdev.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <asm/current.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#define CHAR_DEV_NAME "skdevice"
#define MAX_LENGTH 2000
#define SUCCESS 0
static char *kbuf;
struct cdev *sk_cdev;
dev_t mydev;
static int count=1;
static atomic_t device_available =ATOMIC_INIT(1);
static struct class *sk_class;
/* int (*open) (struct inode *, struct file *); */
static int sk_open(struct inode *inode, struct file *file)
{
if(! atomic_dec_and_test(&device_available) )
{
atomic_inc(&device_available);
return -EBUSY; /* already open */
}
printk (KERN_INFO "ref=%d\n", module_refcount(THIS_MODULE));
return SUCCESS;
}
/*
* int (*release) (struct inode *, struct file *);
*/
static int sk_release(struct inode *inode, struct file *file)
{
atomic_inc(&device_available);
return SUCCESS;
}
/*
*ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
*/
static ssize_t sk_read(struct file *file, char *ubuf, size_t usize, loff_t *ppos)
{
int maxbytes; /* number of bytes from ppos to MAX_LENGTH */
int bytes_to_do; /* number of bytes to read */
int nbytes; /* number of bytes actually read */
maxbytes = MAX_LENGTH - *ppos;
if( maxbytes > usize ) bytes_to_do = usize;
else bytes_to_do = maxbytes;
if( bytes_to_do == 0 ) {
printk("Reached end of device\n");
return -ENOSPC; /* Causes read() to return EOF */
}
nbytes = bytes_to_do -
copy_to_user( ubuf, /* to */
kbuf + *ppos, /* from */
bytes_to_do ); /* how many bytes */
*ppos += nbytes;
return nbytes;
}
/*
*ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
*/
static ssize_t sk_write(struct file *file, const char *ubuf, size_t usize, loff_t *ppos)
{
int nbytes; /* Number of bytes written */
int bytes_to_do; /* Number of bytes to write */
int maxbytes; /* Maximum number of bytes that can be written */
printk("\n%s %ld\n", ubuf, usize);
//pr_info("writecall : %d\n " , *ppos);
maxbytes = MAX_LENGTH - *ppos;
if( maxbytes > usize ) bytes_to_do = usize;
else bytes_to_do = maxbytes;
if( bytes_to_do == 0 ) {
printk("Reached end of device\n");
return -ENOSPC; /* Returns EOF at write() */
}
nbytes = bytes_to_do -
copy_from_user( kbuf + *ppos, /* to */
ubuf, /* from */
bytes_to_do ); /* how many bytes */
*ppos += nbytes;
return nbytes;
}
static loff_t sk_lseek (struct file *file, loff_t offset, int whence)
{
loff_t tp;
switch (whence) {
case 0: /* SEEK_SET */
tp = offset;
break;
case 1: /* SEEK_CUR */
tp = file->f_pos + offset;
break;
case 2: /* SEEK_END */
tp = MAX_LENGTH + offset;
break;
default:
return -EINVAL;
}
tp = tp < MAX_LENGTH ? tp : MAX_LENGTH;
tp = tp >= 0 ? tp : 0;
file->f_pos = tp;
return tp;
}
static struct file_operations sk_fops = {
.owner = THIS_MODULE,
.read = sk_read,
.write = sk_write,
.open = sk_open,
.release = sk_release,
.llseek = sk_lseek
};
static __init int sk_init(void)
{
int ret;
/*int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
* const char *name)
* */
if (alloc_chrdev_region (&mydev, 0, count, CHAR_DEV_NAME) < 0) {
printk (KERN_ERR "failed to reserve major/minor range\n");
return -1;
}
if (!(sk_cdev = cdev_alloc ())) {
printk (KERN_ERR "cdev_alloc() failed\n");
unregister_chrdev_region (mydev, count);
return -1;
}
cdev_init(sk_cdev,&sk_fops);
ret=cdev_add(sk_cdev,mydev,count);
if( ret < 0 ) {
printk(KERN_INFO "Error registering device driver\n");
cdev_del (sk_cdev);
unregister_chrdev_region (mydev, count);
return -1;
}
/* #define class_create(owner, name) */
sk_class = class_create (THIS_MODULE, "MYVIRTUAL");
/*
*struct device *device_create(struct class *class, struct device *parent,
* dev_t devt, void *drvdata, const char *fmt, ...)
*/
device_create (sk_class, NULL, mydev, NULL, "%s", "skdevice");
printk(KERN_INFO"\nDevice Registered: %s\n",CHAR_DEV_NAME);
printk (KERN_INFO "Major number = %d, Minor number = %d\n", MAJOR(mydev),MINOR(mydev));
kbuf =(char *)kzalloc(MAX_LENGTH,GFP_KERNEL);
return 0;
}
static __exit void sk_exit(void)
{
device_destroy (sk_class, mydev);
class_destroy (sk_class);
cdev_del(sk_cdev);
unregister_chrdev_region(mydev,1);
kfree(kbuf);
printk(KERN_INFO "\n Driver unregistered \n");
}
module_init(sk_init);
module_exit(sk_exit);
MODULE_AUTHOR("SATEESHKG");
MODULE_LICENSE("GPL");
<file_sep>/************************************************************************\
* file name : appl.c *
* Description : Application test basic character driver *
* AUthor : <NAME> *
\************************************************************************/
#include <stdio.h>
#include <unistd.h>
#include <fcntl.h>
#include <string.h>
int main()
{
int fd;
char buf[100];
int n =0;
printf("%d\n", getpid());
if(( fd = open("/dev/skdevice", O_RDWR) ) < 0)
perror("open:");
getchar();
printf("\n Now calling read first time \n");
while((n=read(fd, buf, 15)) > 0 )
{
write(STDOUT_FILENO,buf, n);
printf("\n");
}
printf("\n");
printf("Enter data to write to driver: ");
scanf("%[^\n]", buf);
n = strlen(buf);
write(fd, buf, n);
printf("\n Now calling read second time \n");
while((n=read(fd, buf, 15)) > 0 )
{
write(STDOUT_FILENO,buf, n);
printf("\n");
}
close(fd);
return 0;
}
<file_sep>/*********************************************************************\
* Filename: lab1_chr.c
*
* Description: character drvier demo 1
* programmer: <NAME>
* date begun: 04 DEC 2017
\*********************************************************************/
#include <linux/module.h> // for init_module()
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
char modname[] = "lab1_chr";
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/version.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <linux/cdev.h>
#include <asm/types.h>
#define MAJORNO 300
#define MINORNO 0
#define CHAR_DEV_NAME "skdevice"
dev_t mydev;
struct cdev *sk_cdev;
int count = 1;
/*int (*open) (struct inode *, struct file *); */
static int sk_open(struct inode *inode, struct file *filp)
{
pr_info("open called\n");
return 0;
}
/* int (*release) (struct inode *, struct file *); */
static int sk_close(struct inode *inode, struct file *filp)
{
pr_info("Close called \n");
return 0;
}
/* ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); */
static ssize_t sk_read(struct file *filp, char __user * buf,
size_t sz, loff_t * ppos)
{
pr_info("Read called\n");
return 0;
}
/* ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); */
static ssize_t sk_write(struct file *filp, const char *buf, size_t sz,
loff_t * ppos)
{
pr_info("Write called\n");
return 0;
}
static struct file_operations sk_fops = {
.owner = THIS_MODULE,
.open = sk_open,
.release = sk_close,
.read = sk_read,
.write = sk_write
};
static int __init char_sk_init(void)
{
int ret;
pr_info("Installing \'%s\' module\n", modname);
mydev = MKDEV(MAJORNO, MINORNO);
register_chrdev_region(mydev, count, CHAR_DEV_NAME);
sk_cdev = cdev_alloc();
cdev_init(sk_cdev, &sk_fops);
ret = cdev_add(sk_cdev, mydev, count);
if (ret < 0) {
printk("Error registring device driver\n");
return ret;
}
printk(KERN_INFO "Device Registered : %s \n", CHAR_DEV_NAME);
return 0;
}
static void __exit char_sk_exit(void)
{
cdev_del(sk_cdev);
unregister_chrdev_region(mydev, count);
pr_info("Removing \'%s\' module\n", modname);
}
module_init(char_sk_init);
module_exit(char_sk_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("<NAME> ");
MODULE_DESCRIPTION("Details: character driver demo");
<file_sep>/*********************************************************************\
* Filename: lab1_chr.c
*
* Description: character drvier demo 2
* programmer: <NAME>
* date begun: 04 DEC 2017
\*********************************************************************/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/version.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/errno.h>
#define CHAR_DEV_NAME "skdevice"
#define MAX_LENGTH 4000
#define SUCCESS 0
static char kbuf[MAX_LENGTH]="ABCDEFGHIJKLMNOPQRSTUVWXYZ";
static int inuse=0;
static int major;
static int balance;
/* int (*open) (struct inode *, struct file *); */
static int sk_open(struct inode *inode, struct file *filp)
{
if(inuse)
{
printk(KERN_INFO "Device busy %s\n",CHAR_DEV_NAME);
return -EBUSY;
}
inuse=1;
printk(KERN_INFO "Open invoked\n");
balance = strlen(kbuf);
printk("in OPEN balance = %d\n",balance);
return SUCCESS;
}
/* int (*release) (struct inode *, struct file *); */
static int sk_close(struct inode *inode, struct file *filp)
{
inuse = 0;
return SUCCESS;
}
/* ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); */
static ssize_t sk_read(struct file *filp, char __user *buf,
size_t sz, loff_t *fpos)
{
int nbytes,tmp;
printk("in READ balance = %d\n",balance);
if(balance == 0)
return 0;
if( balance <= sz )
{
nbytes = copy_to_user(buf, kbuf + *fpos, balance);
printk("nbytes = %d\n", nbytes);
tmp=balance;
balance = 0;
*fpos =0 ;
printk(" < balance = %d\n",balance);
return tmp;
} else {
nbytes=copy_to_user(buf,kbuf +*fpos,sz);
balance -= sz;
*fpos += sz;
printk(">balance = %d\n",balance);
return sz;
}
}
/* ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); */
static ssize_t sk_write(struct file *filp, const char *buf,
size_t sz, loff_t *fpos)
{
int nbytes=0;
printk(KERN_INFO "\nRecevice data from app %s, nbytes=%d\n", buf, (int)sz);
if( sz < MAX_LENGTH)
{
memset(kbuf, '\0', MAX_LENGTH);
nbytes = copy_from_user(kbuf+(*fpos), buf, (size_t) sz);
printk("write: nbytes = %d\n", nbytes);
balance = sz;
return sz;
}
else
{
printk(KERN_INFO "Too large write buf, overflow\n");
return -1;
}
}
static struct file_operations sk_fops = {
.owner = THIS_MODULE,
.open = sk_open,
.release= sk_close,
.read = sk_read,
.write = sk_write
};
static int __init char_sk_init(void)
{
/* static inline int register_chrdev(unsigned int major, const char *name,
* const struct file_operations *fops) */
major = register_chrdev(0,"skdevice", &sk_fops);
if(major < 0)
{
printk(KERN_ALERT "Device registration failed\n");
return -ENOMEM;
}
printk(KERN_INFO "Major no: %d DevName: %s\n",major,CHAR_DEV_NAME);
return SUCCESS;
}
static void __exit char_sk_exit(void)
{
unregister_chrdev(major,"skdevice");
printk(KERN_INFO "char_dev unloaded\n");
}
module_init(char_sk_init);
module_exit(char_sk_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("<NAME>");
MODULE_DESCRIPTION("Details: character driver demo 2");
|
00a6c01a0a48fdcef9d7cfa0327ea337d484035f
|
[
"C"
] | 4 |
C
|
decasat/Character-Device-Driver
|
5f9c9feb06173ac5a4a25bf9b97277d5163b1933
|
323d4bb9ee6819017e9cdc573f2baea6b0fd90b9
|
refs/heads/master
|
<repo_name>zheindel/culligan-diy<file_sep>/_build/js/insight-subnav.js
//This javascript implements some interactive features for sub navs
//Depends on jQuery.
//table lists
$(function(){
//$('.table-list').each(function(){
// initTableList($(this));
//});
});
var n = 0;
function initTableList($tableList){
//Grab the localized "next" text
var nextText = $tableList.data('more-localized') || "More";
var backText = $tableList.data('back-localized') || "Back";
//Let's create a temporary array to store our overflow items in.
var screenWidth = window.innerWidth || document.documentElement.clientWidth || document.body.clientWidth;
$tableList.data('lastKnownWidth', screenWidth);
processTableList($tableList);
//Add navigation navigtaion
if($tableList.find('.subnav_navigation').length==0)
{
$('<ul class="subnav_navigation">'+
'<li class="subnav_item table-list-back">' +
'<span class="colorbar"></span>'+
'<a href="#" class=""><span class="ion-arrow-left-b"></span> '+ backText + '</a>'+
'<li class="subnav_item table-list-more">' +
'<span class="colorbar"></span>'+
'<a href="#" class="">'+nextText+' <span class="ion-arrow-right-b"></span></a>'+
'</li></ul>').appendTo($tableList);
$tableList.find('.table-list-more').click(function(){
nextTableList($tableList);
});
$tableList.find('.table-list-back').click(function(){
prevTableList($tableList);
});
}
//Add first and last
$tableList.find('.column').first().addClass('first');
$tableList.find('.column').last().addClass('last');
//add position to everything
if($tableList.find('.current').length==0){
$tableList.find('.column').first().addClass('table-list-current-page');
} else {
var foundYou = false;
$tableList.find('.column').each(function(){
if($(this).find('.current').length>0){
$(this).addClass('table-list-current-page');
$(this).removeClass('table-list-right');
foundYou = true;
} else {
if(foundYou)
$(this).addClass('table-list-right');
else
$(this).addClass('table-list-left').removeClass('table-list-right');
}
});
}
if($tableList.find('.table-list-current-page').is('.last'))
$tableList.find('.table-list-more').fadeOut(0);
if($tableList.find('.table-list-current-page').is('.first'))
$tableList.find('.table-list-back').fadeOut(0);
}
function processTableList ($this) {
var $tempItems;
var $listItems = $this.find('li');
var $tableList = $this;
//reset parent element
if(!($tableList.is('.table-list'))){
$tableList = $tableList.closest('.table-list');
}
var tableListHeight = $tableList.height();
if(getTallestItemHeight($listItems) > tableListHeight)
{
$tableList.addClass('table-list-has-more');//This makes the table-list indent from the right making space for the more link box.;
//Remove items from the list until it fits the height defined in CSS
for (var i=0; i<$listItems.length; i++)
{
//Take the last element out and keep trying
if(getTallestItemHeight($listItems) > tableListHeight)
{
$tempItems = $this.find('li').last().detach().add($tempItems);
}
}
//Create another tablelist for the new items.
var $newTableList = $tableList.clone().find('.column').first();
$newTableList.find('ul').empty().append($tempItems);
$tableList.append($newTableList);
processTableList($newTableList); //recurse.
}
}
function checkTableLists(){
var check = true;
$('.table-list').each(function(){
if (getTallestItemHeight($(this).find('li')) > $(this).height())
check = false;
});
return check;
}
function resetTableLists(){
$('.table-list').each(function(){
$(this).removeClass('table-list-transitions-on table-list-has-more');
if($(this).find('.column').length > 0)
{
$(this).find('.column li').appendTo($(this).find('.column ul').first().empty()); //Put everything into one.
// clean up
$(this).find('.subnav_navigation, .column:not(":first-child")').remove();
$(this).find('.first, .last, .table-list-current-page, .table-list-right, .table-list-left').removeClass('first last table-list-current-page table-list-right table-list-left');
}
});
}
function getTallestItemHeight($elements){
var tallestHeight=0;
$elements.each(function(){
if($(this).height()>tallestHeight)
tallestHeight = $(this).height();
});
return tallestHeight;
}
function nextTableList($this){
$this.addClass('table-list-transitions-on');
$this.find('.table-list-back').fadeIn();
if(!($this.find('.table-list-current-page').is('.last')))
$this.find('.table-list-current-page').removeClass('table-list-current-page').addClass('table-list-left').next('.column').addClass('table-list-current-page').removeClass('table-list-right');
if($this.find('.table-list-current-page').is('.last'))
$this.find('.table-list-more').fadeOut();
return $this;
}
function prevTableList($this){
$this.addClass('table-list-transitions-on');
$this.find('.table-list-more').fadeIn();
if(!($this.find('.table-list-current-page').is('.first')))
$this.find('.table-list-current-page').removeClass('table-list-current-page').addClass('table-list-right').prev('.column').addClass('table-list-current-page').removeClass('table-list-left');
if($this.find('.table-list-current-page').is('.first'))
$this.find('.table-list-back').fadeOut();
return $this;
}<file_sep>/_build/_includes/blue-dot-left-callout.html
<div class="blue-dot-left">
<div class="row">
<div class="small-12 medium-6 columns">
<div class="callout-image" style="background-image:url(../img/blue-dot-callout/callout-left-main.png)">
<div class="dot-callout hide-for-small show-for-large-up">
<div class="row">
<div class="medium-4 medium-offset-4 columns">
<img src="{{ page.path }}img/blue-dot-callout/blue-dot.png" />
</div>
<div class="medium-4 columns">
<div class="callout-text">
<img src="{{ page.path }}img/blue-dot-callout/bar-shape.png" />
Each cartridge lasts
<span>10,000 GALLONS</span>
</div>
</div>
</div>
</div>
</div>
</div>
<div class="small-12 medium-6 columns">
<div class="colshell">
<h3>Extended life refills</h3>
<div class="callout-description">
Inclusum illis seductaque nabataeaque moderantum siccis fronde. Obliquis occiduo uno opifex. Pressa terrae calidis feras iussit. Pluvialibus tollere deus quod pugnabant mixta aurea diverso. Fontes corpora pronaque totidemque pontus sed.
</div>
<div class="see-all">
<a href="#">See all filter cartridges</a>
</div>
</div>
</div>
</div>
</div><file_sep>/README.md
# DIY Redesign
The compiled site can be found in the `_deploy` folder. The source files are located in the `_build` folder.
## Compiling the site
In terminal, `cd` to project root.
To build HTML, compile CSS and run a local dev server, use command `jekyll serve --watch`.
This will look for changes, rebuild and allow you to preview the site at `localhost:4000`.
## Requirements
- [Jekyll](http://jekyllrb.com/)
- [SASS](http://sass-lang.com/)
## CSS
- [Foundation](http://foundation.zurb.com/docs/)
## JavaScript
- [JQuery](http://jquery.com/)
- [Modernizr](http://modernizr.com/)
|
82d038b7a180ed50178177f90fb0f454324eaf12
|
[
"JavaScript",
"HTML",
"Markdown"
] | 3 |
JavaScript
|
zheindel/culligan-diy
|
14b2eb59bb886b72148ae5dea75c00ab0c3b8cc3
|
b23b4d26d704ed6ab26b1a9581c27473e4630cc0
|
refs/heads/master
|
<file_sep>package main
import (
goflag "flag"
"github.com/golang/glog"
"github.com/spf13/cobra"
"io"
"kope.io/klogs/pkg/client"
)
const DefaultServerUrl = "http://127.0.0.1:7777"
func NewRootCommand(out io.Writer) (*cobra.Command, error) {
factory := &client.DefaultFactory{
Server: DefaultServerUrl,
}
err := factory.LoadConfigurationFiles()
if err != nil {
return nil, err
}
cmd := &cobra.Command{
Use: "klogs",
Short: "klogs is kubernetes logs",
}
// Really just to force the import
glog.Flush()
cmd.PersistentFlags().AddGoFlagSet(goflag.CommandLine)
cmd.PersistentFlags().StringVar(&factory.Server, "server", factory.Server, "Server to query")
cmd.PersistentFlags().StringVar(&factory.Token, "token", factory.Token, "Token to use to authenticate to the server")
cmd.PersistentFlags().StringVarP(&factory.Username, "user", "u", factory.Username, "Username to use to authenticate to the server")
cmd.PersistentFlags().VarP(newPasswordValue(factory.Password, &factory.Password), "password", "p", "Password to use to authenticate to the server")
// create subcommands
cmd.AddCommand(NewCmdStreams(factory, out))
cmd.AddCommand(NewCmdSearch(factory, out))
return cmd, nil
}
<file_sep>package mesh
import (
"fmt"
"github.com/golang/glog"
"google.golang.org/grpc"
"kope.io/klogs/pkg/proto"
"net/url"
"sync"
"time"
)
type Member struct {
id string
hostInfo proto.HostInfo
mutex sync.Mutex
logsClient proto.LogServerClient
}
func (h *Member) run() {
for {
err := h.runOnce()
if err != nil {
glog.Warningf("error polling host %q: %v", h.id, err)
}
time.Sleep(5 * time.Second)
}
}
func (h *Member) update(request *proto.JoinMeshRequest) {
h.mutex.Lock()
defer h.mutex.Unlock()
if h.hostInfo.Url != request.HostInfo.Url {
h.logsClient = nil
}
h.hostInfo = *request.HostInfo
}
func (h *Member) runOnce() error {
return nil
}
func (h *Member) LogsClient() (proto.LogServerClient, error) {
h.mutex.Lock()
defer h.mutex.Unlock()
client := h.logsClient
if client == nil {
u, err := url.Parse(h.hostInfo.Url)
if err != nil {
return nil, fmt.Errorf("invalid host url %q", h.hostInfo.Url)
}
var opts []grpc.DialOption
opts = append(opts, grpc.WithInsecure())
conn, err := grpc.Dial(u.Host, opts...)
if err != nil {
return nil, fmt.Errorf("failed to connect to mesh client: %v", err)
}
client = proto.NewLogServerClient(conn)
h.logsClient = client
}
return client, nil
}
func (m *Member) Id() string {
return m.id
}
<file_sep>package main
type passwordValue string
func newPasswordValue(val string, p *string) *passwordValue {
*p = val
return (*passwordValue)(p)
}
func (i *passwordValue) String() string {
if *i == "" {
return ""
} else {
return "***"
}
}
func (i *passwordValue) Set(s string) error {
*i = passwordValue(s)
return nil
}
func (i *passwordValue) Type() string {
return "password"
}
<file_sep>package loghub
import (
"github.com/golang/glog"
"kope.io/klogs/pkg/grpc"
"kope.io/klogs/pkg/mesh"
)
type Options struct {
LogGRPC grpc.GRPCOptions
MeshGRPC grpc.GRPCOptions
}
func (o *Options) SetDefaults() {
o.LogGRPC.Listen = "https://:7777"
o.MeshGRPC.Listen = "http://:7878"
}
func ListenAndServe(options *Options) error {
m, err := mesh.NewServer(&options.MeshGRPC)
if err != nil {
return err
}
logServer, err := newLogServer(&options.LogGRPC, m)
if err != nil {
return err
}
go func() {
if err := m.ListenAndServe(); err != nil {
// TODO: Futures?
glog.Fatalf("error starting mesh: %v", err)
}
}()
return logServer.ListenAndServe()
}
<file_sep>// Package klogs is the parent package for the klogs tool
package klogs // import "kope.io/klogs"
<file_sep>package main // import "kope.io/klogs/cmd/klog-hub"
import (
"crypto/tls"
"crypto/x509"
goflag "flag"
"fmt"
"github.com/golang/glog"
"github.com/spf13/pflag"
"io/ioutil"
"kope.io/klogs/pkg/grpc"
"kope.io/klogs/pkg/loghub"
"os"
"strings"
)
var (
// value overwritten during build. This can be used to resolve issues.
version = "0.1"
gitRepo = "https://kope.io/klogs"
)
type Options struct {
Loghub loghub.Options
GrpcPublicTlsCert string
GrpcPublicTlsKey string
GrpcPublicAuthenticationMethod string
KubernetesAuthenticationUrl string
KubernetesAuthenticationCA string
}
func main() {
flags := pflag.NewFlagSet("", pflag.ExitOnError)
var options Options
options.Loghub.SetDefaults()
options.GrpcPublicAuthenticationMethod = "kubernetes"
options.KubernetesAuthenticationUrl = "https://kubernetes.default/api"
options.KubernetesAuthenticationCA = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
flags.StringVar(&options.Loghub.LogGRPC.Listen, "grpc-public-listen", options.Loghub.LogGRPC.Listen, "Address on which to listen for public request")
flags.StringVar(&options.Loghub.MeshGRPC.Listen, "grpc-mesh-listen", options.Loghub.MeshGRPC.Listen, "Address on which to listen for internal requests")
flags.StringVar(&options.GrpcPublicTlsCert, "grpc-public-tls-cert", options.GrpcPublicTlsCert, "Path to TLS certificate")
flags.StringVar(&options.GrpcPublicTlsKey, "grpc-public-tls-key", options.GrpcPublicTlsKey, "Path to TLS private key")
flags.StringVar(&options.GrpcPublicAuthenticationMethod, "grpc-public-authentication", options.GrpcPublicAuthenticationMethod, "Authentication method to use")
flags.StringVar(&options.KubernetesAuthenticationUrl, "kubernetes-auth-url", options.KubernetesAuthenticationUrl, "Kubernetes authentication URL to use")
// Trick to avoid 'logging before flag.Parse' warning
goflag.CommandLine.Parse([]string{})
goflag.Set("logtostderr", "true")
flags.AddGoFlagSet(goflag.CommandLine)
//clientConfig := kubectl_util.DefaultClientConfig(flags)
args := os.Args
flagsPath := "/config/flags.yaml"
_, err := os.Lstat(flagsPath)
if err == nil {
flagsFile, err := ioutil.ReadFile(flagsPath)
if err != nil {
glog.Fatalf("error reading %q: %v", flagsPath, err)
}
for _, line := range strings.Split(string(flagsFile), "\n") {
line = strings.TrimSpace(line)
args = append(args, line)
}
} else if !os.IsNotExist(err) {
glog.Infof("Cannot read %q: %v", flagsPath, err)
}
flags.Parse(args)
glog.Infof("loghub - build: %v - %v", gitRepo, version)
err = Run(&options)
if err != nil {
fmt.Fprintf(os.Stderr, "unexpected error: %v", err)
os.Exit(1)
}
os.Exit(0)
}
func Run(options *Options) error {
var err error
if options.GrpcPublicTlsCert != "" {
options.Loghub.LogGRPC.TLSCert, err = ioutil.ReadFile(options.GrpcPublicTlsCert)
if err != nil {
return fmt.Errorf("error reading file %q: %v", options.GrpcPublicTlsCert, err)
}
}
if options.GrpcPublicTlsKey != "" {
options.Loghub.LogGRPC.TLSKey, err = ioutil.ReadFile(options.GrpcPublicTlsKey)
if err != nil {
return fmt.Errorf("error reading file %q: %v", options.GrpcPublicTlsKey, err)
}
}
if options.GrpcPublicAuthenticationMethod == "kubernetes" {
tlsConfig := &tls.Config{}
if options.KubernetesAuthenticationCA != "" {
rootCAs := x509.NewCertPool()
pemData, err := ioutil.ReadFile(options.KubernetesAuthenticationCA)
if err != nil {
return fmt.Errorf("error reading file %q: %v", options.KubernetesAuthenticationCA, err)
}
if !rootCAs.AppendCertsFromPEM(pemData) {
return fmt.Errorf("unable to parse ca certificate file %q: %v", options.KubernetesAuthenticationCA, err)
}
tlsConfig.RootCAs = rootCAs
}
options.Loghub.LogGRPC.Authorizer = grpc.NewKubernetesAuthorizer(options.KubernetesAuthenticationUrl, tlsConfig)
} else {
// options.LogGRPC.Authorizer = grpc.NewTokenAuthorizer([]string{grpcPublicToken})
return fmt.Errorf("unknown authentication method %q", options.GrpcPublicAuthenticationMethod)
}
err = loghub.ListenAndServe(&options.Loghub)
if err != nil {
return fmt.Errorf("error running server: %v", err)
}
return err
}
<file_sep>package logspoke
import (
"github.com/golang/glog"
"time"
)
type Scraper struct {
pods *PodsDirectory
containers *ContainersDirectory
}
func newScraper(options *Options, nodeState *NodeState) (*Scraper, error) {
scraper := &Scraper{}
pods, err := NewPodsDirectory(options.PodDir, nodeState)
if err != nil {
return nil, err
}
scraper.pods = pods
containers, err := NewContainerLogsDirectory(options.ContainerDir, nodeState)
if err != nil {
return nil, err
}
scraper.containers = containers
return scraper, nil
}
func (s *Scraper) Run() error {
for {
if err := s.pods.Scan(); err != nil {
glog.Warningf("error scanning pods directory: %v", err)
}
if err := s.containers.Scan(); err != nil {
glog.Warningf("error scanning containers directory: %v", err)
}
time.Sleep(time.Minute)
}
}
<file_sep>package logspoke
import (
"fmt"
"github.com/golang/glog"
"golang.org/x/net/context"
"google.golang.org/grpc"
"kope.io/klogs/pkg/proto"
"net/url"
"time"
)
type MeshMember struct {
server *url.URL
id string
listen *url.URL
}
func newMeshMember(options *Options) (*MeshMember, error) {
if options.JoinHub == "" {
return nil, fmt.Errorf("JoinHub not set")
}
if options.NodeName == "" {
return nil, fmt.Errorf("NodeName not set")
}
serverUrl, err := url.Parse(options.JoinHub)
if err != nil {
return nil, fmt.Errorf("Invalid JoinHub url %q", options.JoinHub)
}
listenUrl, err := url.Parse(options.Listen)
if err != nil {
return nil, fmt.Errorf("Invalid listen url %q", options.Listen)
}
m := &MeshMember{
id: options.NodeName,
server: serverUrl,
listen: listenUrl,
}
return m, nil
}
func (m *MeshMember) Run() error {
for {
err := func() error {
var opts []grpc.DialOption
//if *tls {
// var sn string
// if *serverHostOverride != "" {
// sn = *serverHostOverride
// }
// var creds credentials.TransportCredentials
// if *caFile != "" {
// var err error
// creds, err = credentials.NewClientTLSFromFile(*caFile, sn)
// if err != nil {
// grpclog.Fatalf("Failed to create TLS credentials %v", err)
// }
// } else {
// creds = credentials.NewClientTLSFromCert(nil, sn)
// }
// opts = append(opts, grpc.WithTransportCredentials(creds))
//} else {
opts = append(opts, grpc.WithInsecure())
//}
conn, err := grpc.Dial(m.server.Host, opts...)
if err != nil {
return fmt.Errorf("failed to connect to mesh server: %v", err)
}
defer conn.Close()
client := proto.NewMeshServiceClient(conn)
if err != nil {
return fmt.Errorf("error building mesh client: %v", err)
}
for {
ctx := context.Background()
request := &proto.JoinMeshRequest{
HostInfo: &proto.HostInfo{
Id: m.id,
Url: m.listen.Scheme + "://" + m.listen.Host,
},
}
response, err := client.JoinMesh(ctx, request)
if err != nil {
return fmt.Errorf("unexpected response to mesh join: %v", err)
}
glog.V(2).Infof("mesh join response %s", response)
time.Sleep(10 * time.Second)
}
return nil
}()
if err != nil {
glog.Warningf("error joining mesh: %v", err)
}
time.Sleep(10 * time.Second)
}
}
<file_sep>package s3archive
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
"github.com/golang/glog"
"kope.io/klogs/pkg/archive"
"kope.io/klogs/pkg/proto"
"net/url"
"os"
"path"
"strings"
)
type Sink struct {
bucket string
basekey string
s3Client s3iface.S3API
}
var _ archive.Sink = &Sink{}
func NewSink(u *url.URL) (*Sink, error) {
bucket := strings.TrimSuffix(u.Host, "/")
s := &Sink{
bucket: bucket,
basekey: u.Path,
}
var region string
{
config := aws.NewConfig().WithRegion("us-east-1")
session := session.New()
s3Client := s3.New(session, config)
request := &s3.GetBucketLocationInput{}
request.Bucket = aws.String(bucket)
glog.V(2).Infof("Querying S3 for bucket location for %q", bucket)
response, err := s3Client.GetBucketLocation(request)
if err != nil {
return nil, fmt.Errorf("error getting location for S3 bucket %q: %v", bucket, err)
}
if response.LocationConstraint == nil {
// US Classic does not return a region
region = "us-east-1"
} else {
region = *response.LocationConstraint
// Another special case: "EU" can mean eu-west-1
if region == "EU" {
region = "eu-west-1"
}
}
glog.V(2).Infof("Found bucket %q in region %q", bucket, region)
}
config := aws.NewConfig().WithRegion(region)
session := session.New()
s.s3Client = s3.New(session, config)
return s, nil
}
func (s *Sink) AddToArchive(sourcePath string, podUID string, fileInfo *proto.LogFile) error {
glog.V(2).Infof("found file to archive: %q %q", sourcePath, fileInfo)
s3Key := path.Join(s.basekey, "pods", podUID, "logs", fileInfo.Path)
f, err := os.OpenFile(sourcePath, os.O_RDONLY, 0)
if err != nil {
return fmt.Errorf("unable to open file %q: %v", sourcePath, err)
}
defer f.Close()
request := &s3.PutObjectInput{}
request.Body = f
request.Bucket = aws.String(s.bucket)
request.Key = aws.String(s3Key)
// We don't need Content-MD5: https://github.com/aws/aws-sdk-go/issues/208
// TODO: Only if changed?
_, err = s.s3Client.PutObject(request)
if err != nil {
return fmt.Errorf("error writing s3://%s/%s: %v", s.bucket, s3Key, err)
}
glog.V(2).Infof("Uploaded file to s3://%s/%s", s.bucket, s3Key)
return nil
}
<file_sep>package grpc
import (
"crypto/tls"
"fmt"
"github.com/golang/glog"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"net"
"net/url"
)
type GRPCOptions struct {
Listen string
TLSCert []byte
TLSKey []byte
Authorizer Authorizer
}
type GRPCServer struct {
listen url.URL
Server *grpc.Server
}
func NewGrpcServer(options *GRPCOptions) (*GRPCServer, error) {
u, err := url.Parse(options.Listen)
if err != nil {
return nil, fmt.Errorf("Invalid listen address %q", options.Listen)
}
g := &GRPCServer{
listen: *u,
}
var opts []grpc.ServerOption
if u.Scheme == "http" {
// No options needed
} else if u.Scheme == "https" {
if options.TLSCert == nil {
return nil, fmt.Errorf("https selected, but tls-cert not provided")
}
if options.TLSKey == nil {
return nil, fmt.Errorf("https selected, but tls-key not provided")
}
cert, err := tls.X509KeyPair(options.TLSCert, options.TLSKey)
if err != nil {
return nil, err
}
credentials := credentials.NewServerTLSFromCert(&cert)
opts = append(opts, grpc.Creds(credentials))
} else {
return nil, fmt.Errorf("scheme not recognized: %q", u.Scheme)
}
if options.Authorizer != nil {
opts = append(opts, grpc.StreamInterceptor(func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
glog.Infof("Authorizing request %v", info.FullMethod)
if err := options.Authorizer.Authorize(stream.Context()); err != nil {
return err
}
return handler(srv, stream)
}))
opts = append(opts, grpc.UnaryInterceptor(func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
glog.Infof("Authorizing request %v", info.FullMethod)
if err := options.Authorizer.Authorize(ctx); err != nil {
return nil, err
}
return handler(ctx, req)
}))
}
g.Server = grpc.NewServer(opts...)
return g, nil
}
func (g *GRPCServer) ListenAndServe() error {
glog.Infof("Listening on %s", g.listen.String())
lis, err := net.Listen("tcp", g.listen.Host)
if err != nil {
return fmt.Errorf("Failed to listen on %q: %v", g.listen, err)
}
defer lis.Close()
return g.Server.Serve(lis)
}
<file_sep># klogs
Log aggregation without aggregation
<file_sep>package main
import (
"github.com/spf13/cobra"
"io"
"kope.io/klogs/pkg/client"
)
func NewCmdStreams(factory client.Factory, out io.Writer) *cobra.Command {
options := &client.ListStreamsOptions{}
cmd := &cobra.Command{
Use: "streams",
Short: "Streams",
Run: func(cmd *cobra.Command, args []string) {
err := client.RunListStreams(factory, out, options)
if err != nil {
exitWithError(err)
}
},
}
return cmd
}
<file_sep>package logspoke
import (
"fmt"
"github.com/golang/glog"
"google.golang.org/grpc"
"kope.io/klogs/pkg/proto"
"net"
"net/url"
)
type LogServer struct {
listen string
logServer proto.LogServerServer
}
func newLogServer(options *Options, logServer proto.LogServerServer) (*LogServer, error) {
if options.Listen == "" {
return nil, fmt.Errorf("MeshListen not set")
}
m := &LogServer{
listen: options.Listen,
logServer: logServer,
}
return m, nil
}
func (m *LogServer) Run() error {
u, err := url.Parse(m.listen)
if err != nil {
return fmt.Errorf("Invalid listen url %q", m.listen)
}
glog.Infof("Serving GRPC on %s", m.listen)
lis, err := net.Listen("tcp", u.Host)
if err != nil {
return fmt.Errorf("Failed to listen on %q: %v", m.listen, err)
}
defer lis.Close()
var opts []grpc.ServerOption
//if *tls {
// creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile)
// if err != nil {
// grpclog.Fatalf("Failed to generate credentials %v", err)
// }
// opts = []grpc.ServerOption{grpc.Creds(creds)}
//}
grpcServer := grpc.NewServer(opts...)
proto.RegisterLogServerServer(grpcServer, m.logServer)
err = grpcServer.Serve(lis)
if err != nil {
return fmt.Errorf("error running grpc server: %v", err)
}
return nil
}
<file_sep>package client
import (
"bytes"
"fmt"
"github.com/golang/glog"
"golang.org/x/net/context"
"io"
"kope.io/klogs/pkg/proto"
"strconv"
"strings"
"time"
)
const (
OutputFormatRaw = "raw"
OutputFormatDescribe = "describe"
)
type SearchOptions struct {
Output string
}
func RunSearch(f Factory, out io.Writer, args []string, o *SearchOptions) error {
request := &proto.SearchRequest{}
var formatter func(commonFields *proto.Fields, items []*proto.SearchResult, out io.Writer) error
switch o.Output {
case OutputFormatRaw:
formatter = formatRaw
case OutputFormatDescribe:
formatter = formatDescribe
default:
return fmt.Errorf("unknown output format %q", o.Output)
}
if len(args) > 0 {
for _, arg := range args {
// TODO: build a parser properly!
if strings.Contains(arg, "!=") {
i := strings.Index(arg, "!=")
request.FieldFilters = append(request.FieldFilters, &proto.FieldFilter{
Key: arg[0:i],
Value: arg[i+2:],
Op: proto.FieldFilterOperator_NOT_EQ,
})
} else if strings.Contains(arg, "=") {
tokens := strings.SplitN(arg, "=", 2)
key := tokens[0]
value := tokens[1]
if key == "age" {
d, err := parseDurationExpression(value)
if err != nil {
return err
}
// TODO: Need to sync times somehow
ts := time.Now().Add(-d)
request.FieldFilters = append(request.FieldFilters, &proto.FieldFilter{
Key: "@timestamp",
Value: ts.Format(time.RFC3339Nano),
Op: proto.FieldFilterOperator_GTE,
})
} else {
request.FieldFilters = append(request.FieldFilters, &proto.FieldFilter{
Key: key,
Value: value,
Op: proto.FieldFilterOperator_EQ,
})
}
} else {
// substring match
if request.Contains != "" {
return fmt.Errorf("multiple search not yet implemented")
}
request.Contains = arg
}
}
}
glog.V(2).Infof("query: %v", request)
client, err := f.LogServerClient()
if err != nil {
return err
}
// TODO: What is the right context?
ctx := context.Background()
stream, err := client.Search(ctx, request)
if err != nil {
return fmt.Errorf("error making request: %v", err)
}
for {
in, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("error reading from server: %v", err)
}
if err := formatter(in.CommonFields, in.Items, out); err != nil {
return fmt.Errorf("error writing results: %v", err)
}
}
return nil
}
func parseDurationExpression(s string) (time.Duration, error) {
s = strings.TrimSpace(s)
units := time.Minute
if strings.HasSuffix(s, "s") {
units = time.Second
s = s[0 : len(s)-1]
} else if strings.HasSuffix(s, "m") {
units = time.Minute
s = s[0 : len(s)-1]
} else if strings.HasSuffix(s, "h") {
units = time.Hour
s = s[0 : len(s)-1]
} else if strings.HasSuffix(s, "d") {
units = time.Hour * 24
s = s[0 : len(s)-1]
} else if strings.HasSuffix(s, "w") {
units = time.Hour * 24 * 7
s = s[0 : len(s)-1]
}
number, err := strconv.Atoi(s)
if err != nil {
return 0, fmt.Errorf("cannot parse %q as duration", s)
}
d := time.Duration(number) * units
return d, nil
}
func formatRaw(commonFields *proto.Fields, items []*proto.SearchResult, out io.Writer) error {
var b bytes.Buffer
for _, item := range items {
if item.Fields != nil {
for _, f := range item.Fields.Fields {
if f.Key != "log" {
continue
}
b.WriteString(strings.TrimSuffix(f.Value, "\n"))
b.WriteString("\n")
}
}
}
_, err := b.WriteTo(out)
return err
}
func formatDescribe(commonFields *proto.Fields, items []*proto.SearchResult, out io.Writer) error {
var b bytes.Buffer
for _, item := range items {
b.WriteString("\n-----------\n")
t := ""
if item.Timestamp != 0 {
seconds := int64(item.Timestamp / 1E9)
nanos := int64(item.Timestamp % 1E9)
ts := time.Unix(seconds, nanos)
t = ts.Format(time.RFC3339Nano)
b.WriteString("time\t")
b.WriteString(t)
b.WriteString("\n")
}
if item.Fields != nil {
for _, f := range item.Fields.Fields {
if f.Key != "log" {
continue
}
b.WriteString(f.Key)
b.WriteString("\t")
b.WriteString(strings.TrimSuffix(f.Value, "\n"))
b.WriteString("\n")
}
for _, f := range item.Fields.Fields {
if f.Key == "log" {
continue
}
b.WriteString(f.Key)
b.WriteString("\t")
b.WriteString(f.Value)
b.WriteString("\n")
}
}
if commonFields != nil {
for _, f := range commonFields.Fields {
b.WriteString(f.Key)
b.WriteString("\t")
b.WriteString(f.Value)
b.WriteString("\n")
}
}
}
_, err := b.WriteTo(out)
return err
}
<file_sep>package loghub
import (
"fmt"
"github.com/golang/glog"
"golang.org/x/net/context"
"io"
"kope.io/klogs/pkg/grpc"
"kope.io/klogs/pkg/mesh"
"kope.io/klogs/pkg/proto"
"sync"
)
type LogServer struct {
grpcServer *grpc.GRPCServer
mesh *mesh.Server
}
var _ proto.LogServerServer = &LogServer{}
func newLogServer(options *grpc.GRPCOptions, mesh *mesh.Server) (*LogServer, error) {
grpcServer, err := grpc.NewGrpcServer(options)
if err != nil {
return nil, err
}
s := &LogServer{
grpcServer: grpcServer,
mesh: mesh,
}
proto.RegisterLogServerServer(grpcServer.Server, s)
return s, nil
}
func (s *LogServer) ListenAndServe() error {
return s.grpcServer.ListenAndServe()
}
func (s *LogServer) GetStreams(request *proto.GetStreamsRequest, out proto.LogServer_GetStreamsServer) error {
ctx := out.Context()
members := s.mesh.Members()
for _, member := range members {
// TODO: Run in parallel?
client, err := member.LogsClient()
if err != nil {
// TODO: retries / toleration
return fmt.Errorf("error fetching client for %q: %v", member.Id(), err)
}
stream, err := client.GetStreams(ctx, request)
if err != nil {
// TODO: retries / toleration
return fmt.Errorf("error querying member %q: %v", member.Id(), err)
}
for {
in, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
// TODO: retries / toleration
return fmt.Errorf("error reading from member %q: %v", member.Id(), err)
}
err = out.Send(in)
if err != nil {
return fmt.Errorf("error sending results: %v", err)
}
}
}
return nil
}
func (s *LogServer) Search(request *proto.SearchRequest, out proto.LogServer_SearchServer) error {
// TODO: Copy-pasted
ctx := out.Context()
members := s.mesh.Members()
glog.Warningf("member filtering not implemented")
var sendMutex sync.Mutex
var wg sync.WaitGroup
ops := make([]*DistributedOp, len(members))
wg.Add(len(members))
for i, member := range members {
search := &DistributedOp{
ctx: ctx,
member: member,
}
ops[i] = search
go func(search *DistributedOp) {
search.err = search.Search(&sendMutex, request, out)
wg.Done()
}(search)
}
wg.Wait()
for _, op := range ops {
if op.err != nil {
return fmt.Errorf("error from member %q: %v", op.member.Id(), op.err)
}
}
return nil
}
type DistributedOp struct {
ctx context.Context
member *mesh.Member
err error
}
func (s *DistributedOp) Search(sendMutex *sync.Mutex, request *proto.SearchRequest, out proto.LogServer_SearchServer) error {
client, err := s.member.LogsClient()
if err != nil {
// TODO: retries / toleration
return fmt.Errorf("error fetching client: %v", err)
}
stream, err := client.Search(s.ctx, request)
if err != nil {
// TODO: retries / toleration
return fmt.Errorf("error querying member: %v", err)
}
for {
in, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
// TODO: retries / toleration
return fmt.Errorf("error reading result: %v", err)
}
sendMutex.Lock()
err = out.Send(in)
sendMutex.Unlock()
if err != nil {
return fmt.Errorf("error sending results: %v", err)
}
}
return nil
}
<file_sep>package main
import (
"github.com/spf13/cobra"
"io"
"kope.io/klogs/pkg/client"
)
func NewCmdSearch(factory client.Factory, out io.Writer) *cobra.Command {
options := &client.SearchOptions{}
options.Output = client.OutputFormatDescribe
cmd := &cobra.Command{
Use: "search",
Aliases: []string{"s"},
Short: "search",
Run: func(cmd *cobra.Command, args []string) {
err := client.RunSearch(factory, out, args, options)
if err != nil {
exitWithError(err)
}
},
}
cmd.PersistentFlags().StringVarP(&options.Output, "output", "o", options.Output, "Output format: raw, describe")
return cmd
}
<file_sep>package client
import (
"fmt"
"golang.org/x/net/context"
"io"
"kope.io/klogs/pkg/proto"
)
type ListStreamsOptions struct {
}
func RunListStreams(f Factory, out io.Writer, o *ListStreamsOptions) error {
request := &proto.GetStreamsRequest{}
client, err := f.LogServerClient()
if err != nil {
return err
}
// TODO: What is the right context?
ctx := context.Background()
stream, err := client.GetStreams(ctx, request)
if err != nil {
return fmt.Errorf("error making request: %v", err)
}
for {
in, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("error reading from server: %v", err)
}
_, err = fmt.Fprintf(out, "%v\n", in)
if err != nil {
return fmt.Errorf("error writing results: %v", err)
}
}
return nil
}
<file_sep>package logspoke
import (
"fmt"
"github.com/golang/glog"
"io"
"kope.io/klogs/pkg/proto"
"os"
"path"
"time"
)
type PodsDirectory struct {
basedir string
idlePeriod time.Duration
state *NodeState
}
func NewPodsDirectory(basedir string, state *NodeState) (*PodsDirectory, error) {
d := &PodsDirectory{
basedir: basedir,
idlePeriod: idlePeriod,
state: state,
}
return d, nil
}
func (d *PodsDirectory) Scan() error {
return d.scanPodsDir(d.basedir)
}
func (d *PodsDirectory) scanPodsDir(basepath string) error {
f, err := os.OpenFile(basepath, os.O_RDONLY, 0)
if err != nil {
return fmt.Errorf("error opening %q: %v", basepath, err)
}
defer f.Close()
names, err := f.Readdirnames(-1)
if err != nil {
return fmt.Errorf("error reading directory %q: %v", basepath, err)
}
for _, name := range names {
p := path.Join(basepath, name)
glog.V(4).Infof("Found pod: %q", p)
err = d.scanPodDirectory(p, name)
if err != nil {
return err
}
}
d.state.CleanupPodLogs(names)
return nil
}
func (d *PodsDirectory) scanPodDirectory(basepath string, podID string) error {
p := path.Join(basepath, "volumes/kubernetes.io~empty-dir/logs")
stat, err := os.Lstat(p)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return fmt.Errorf("error doing stat on %q: %v", p, err)
}
if !stat.IsDir() {
return nil
}
podState := d.state.GetPodState(podID)
glog.V(4).Infof("Found pod logs mount: %q", p)
fileMap := make(map[string]struct{})
err = d.scanLogsTree(p, podState, "", fileMap)
if err != nil {
return err
}
glog.Warningf("TODO: Remove files not in fileMap")
return nil
}
func (d *PodsDirectory) scanLogsTree(basepath string, podState *PodState, relativePath string, fileMap map[string]struct{}) error {
f, err := os.OpenFile(basepath, os.O_RDONLY, 0)
if err != nil {
return fmt.Errorf("error opening %q: %v", basepath, err)
}
defer f.Close()
for {
names, err := f.Readdirnames(512)
if err != nil {
if err == io.EOF {
break
}
return fmt.Errorf("error reading directory %q: %v", basepath, err)
}
for _, name := range names {
p := path.Join(basepath, name)
// TOOD: Use dirent to find out if dirs or not?
stat, err := os.Lstat(p)
if err != nil {
return fmt.Errorf("error doing lstat on %q: %v", p, err)
}
if stat.IsDir() {
err = d.scanLogsTree(p, podState, path.Join(relativePath, name), fileMap)
if err != nil {
return err
}
} else {
f := path.Join(relativePath, name)
fileMap[f] = struct{}{}
fields := &proto.Fields{}
podState.foundFile(p, path.Join(relativePath, name), stat, fields)
}
}
}
return nil
}
<file_sep>package mesh
import (
"fmt"
"github.com/golang/glog"
"golang.org/x/net/context"
"kope.io/klogs/pkg/grpc"
"kope.io/klogs/pkg/proto"
"sync"
)
type Server struct {
grpc *grpc.GRPCServer
mutex sync.Mutex
members map[string]*Member
}
var _ proto.MeshServiceServer = &Server{}
func NewServer(options *grpc.GRPCOptions) (*Server, error) {
grpcServer, err := grpc.NewGrpcServer(options)
if err != nil {
return nil, err
}
s := &Server{
grpc: grpcServer,
members: make(map[string]*Member),
}
proto.RegisterMeshServiceServer(grpcServer.Server, s)
return s, nil
}
func (s *Server) ListenAndServe() error {
return s.grpc.ListenAndServe()
}
func (s *Server) JoinMesh(context context.Context, request *proto.JoinMeshRequest) (*proto.JoinMeshResponse, error) {
glog.Infof("JoinMesh %s", request)
if request.HostInfo == nil {
return nil, fmt.Errorf("HostInfo not set")
}
id := request.HostInfo.Id
if id == "" {
return nil, fmt.Errorf("HostInfo.Id not set")
}
s.mutex.Lock()
defer s.mutex.Unlock()
h := s.members[id]
isNew := false
if h == nil {
h = &Member{id: id}
s.members[id] = h
isNew = true
}
h.update(request)
if isNew {
go h.run()
}
response := &proto.JoinMeshResponse{}
return response, nil
}
// Hosts returns a snapshot of the hosts
func (s *Server) Members() []*Member {
s.mutex.Lock()
defer s.mutex.Unlock()
members := make([]*Member, 0, len(s.members))
for _, m := range s.members {
members = append(members, m)
}
return members
}
<file_sep>package client
import (
"fmt"
"github.com/golang/glog"
"gopkg.in/yaml.v2"
"io/ioutil"
"kope.io/klogs/pkg/grpc"
"kope.io/klogs/pkg/proto"
"os"
"path/filepath"
"strings"
)
type Factory interface {
LogServerClient() (proto.LogServerClient, error)
}
type DefaultFactory struct {
Server string `json:"server,omitempty"`
Token string `json:"token,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
}
var _ Factory = &DefaultFactory{}
func (f *DefaultFactory) LogServerClient() (proto.LogServerClient, error) {
options := &grpc.GRPCClientOptions{
Server: f.Server,
Token: f.Token,
Username: f.Username,
Password: <PASSWORD>,
}
conn, err := grpc.NewGRPCClient(options)
if err != nil {
return nil, err
}
client := proto.NewLogServerClient(conn)
return client, nil
}
func (f *DefaultFactory) LoadConfigurationFiles() error {
var paths []string
home := os.Getenv("HOME")
if home != "" {
paths = append(paths, filepath.Join(home, ".klogs", "config.yaml"))
}
for _, p := range paths {
data, err := ioutil.ReadFile(p)
if err != nil {
if os.IsNotExist(err) {
continue
}
return fmt.Errorf("error reading config file %q: %v", p, err)
}
s := strings.TrimSpace(string(data))
if s == "" {
continue
}
glog.V(2).Infof("Parsing config file %q", p)
err = yaml.Unmarshal([]byte(s), f)
if err != nil {
return fmt.Errorf("error parsing config file %q: %v", p, err)
}
return nil
}
return nil
}
<file_sep>package logspoke
import (
"encoding/json"
"fmt"
"github.com/golang/glog"
"io"
"io/ioutil"
"kope.io/klogs/pkg/proto"
"os"
"path"
"strings"
)
type ContainersDirectory struct {
containersDir string
state *NodeState
}
func NewContainerLogsDirectory(containersDir string, state *NodeState) (*ContainersDirectory, error) {
d := &ContainersDirectory{
containersDir: containersDir,
state: state,
}
return d, nil
}
func (d *ContainersDirectory) Scan() error {
return d.scanContainersDir(d.containersDir)
}
func (d *ContainersDirectory) scanContainersDir(basepath string) error {
f, err := os.OpenFile(basepath, os.O_RDONLY, 0)
if err != nil {
return fmt.Errorf("error opening %q: %v", basepath, err)
}
defer f.Close()
names, err := f.Readdirnames(-1)
if err != nil {
return fmt.Errorf("error reading directory %q: %v", basepath, err)
}
for _, name := range names {
p := path.Join(basepath, name)
glog.V(4).Infof("Found container directory: %q", p)
err = d.scanContainerDirectory(p, name)
if err != nil {
return err
}
}
d.state.CleanupContainerLogs(names)
return nil
}
type DockerConfigV2 struct {
Config DockerConfigV2_Config
}
type DockerConfigV2_Config struct {
Image string
Labels map[string]string
}
func tryReadConfig(containerID string, containerDir string) *proto.Fields {
configPath := path.Join(containerDir, "config.v2.json")
data, err := ioutil.ReadFile(configPath)
if err != nil {
if os.IsNotExist(err) {
// Ignore
glog.V(4).Infof("No config.v2.json file in %q", containerDir)
} else {
glog.Warningf("error reading file %q: %v", configPath, err)
}
return nil
}
config := &DockerConfigV2{}
err = json.Unmarshal(data, config)
if err != nil {
glog.Warningf("error parsing file %q: %v", configPath, err)
return nil
}
fields := &proto.Fields{}
fields.Fields = append(fields.Fields, &proto.Field{
Key: "container.id",
Value: containerID,
})
for k, v := range config.Config.Labels {
switch k {
case "io.kubernetes.container.name":
fields.Fields = append(fields.Fields, &proto.Field{
Key: "container.name",
Value: v,
})
case "io.kubernetes.pod.name":
fields.Fields = append(fields.Fields, &proto.Field{
Key: "pod.name",
Value: v,
})
case "io.kubernetes.pod.namespace":
fields.Fields = append(fields.Fields, &proto.Field{
Key: "pod.namespace",
Value: v,
})
case "io.kubernetes.pod.uid":
fields.Fields = append(fields.Fields, &proto.Field{
Key: "pod.uid",
Value: v,
})
}
}
//"io.kubernetes.container.hash": "8053578f",
// "io.kubernetes.container.name": "kubedns",
// "io.kubernetes.container.ports": "[{\"name\":\"dns-local\",\"containerPort\":10053,\"protocol\":\"UDP\"},{\"name\":\"dns-tcp-local\",\"containerPort\":10053,\"protocol\":\"TCP\"}]",
// "io.kubernetes.container.restartCount": "0",
// "io.kubernetes.container.terminationMessagePath": "/dev/termination-log",
// "io.kubernetes.pod.name": "kube-dns-v20-90109312-80wgs",
// "io.kubernetes.pod.namespace": "kube-system",
// "io.kubernetes.pod.terminationGracePeriod": "30",
// "io.kubernetes.pod.uid": "854530ad-975a-11e6-b8af-06e5bea45582"
//"io.kubernetes.container.hash": "d8dbe16c",
// "io.kubernetes.container.name": "POD",
// "io.kubernetes.container.restartCount": "0",
// "io.kubernetes.container.terminationMessagePath": "",
// "io.kubernetes.pod.name": "kube-dns-v20-90109312-xojr5",
// "io.kubernetes.pod.namespace": "kube-system",
// "io.kubernetes.pod.terminationGracePeriod": "30",
// "io.kubernetes.pod.uid": "86d89496-975a-11e6-b8af-06e5bea45582"
return fields
}
func (d *ContainersDirectory) scanContainerDirectory(containerDir string, containerID string) error {
containerState := d.state.GetContainerState(containerID)
fields := tryReadConfig(containerID, containerDir)
glog.V(4).Infof("Found container: %q", containerID)
fileMap := make(map[string]struct{})
f, err := os.OpenFile(containerDir, os.O_RDONLY, 0)
if err != nil {
return fmt.Errorf("error opening %q: %v", containerDir, err)
}
defer f.Close()
for {
names, err := f.Readdirnames(512)
if err != nil {
if err == io.EOF {
break
}
return fmt.Errorf("error reading directory %q: %v", containerDir, err)
}
for _, name := range names {
if !strings.HasPrefix(name, containerID+"-json.log") {
switch name {
case "hostconfig.json", "config.v2.json", "resolv.conf", "resolv.conf.hash", "hosts", "shm", "hostname":
// Ignore
default:
glog.Infof("Ignoring unknown file %q", name)
}
continue
}
p := path.Join(containerDir, name)
glog.Infof("Found container log file %q", p)
fileMap[name] = struct{}{}
stat, err := os.Lstat(p)
if err != nil {
if !os.IsNotExist(err) {
glog.Warningf("error doing lstat on file %q: %v", p, err)
}
continue
}
containerState.foundFile(p, name, stat, fields)
}
glog.Warningf("TODO: Remove files not in fileMap")
}
return nil
}
<file_sep>package grpc
import (
"errors"
"golang.org/x/net/context"
"google.golang.org/grpc/metadata"
)
var AuthorizationRequired = errors.New("authorization is required")
var InvalidAuthorization = errors.New("unauthorized")
type Authorizer interface {
Authorize(ctx context.Context) error
}
type TokenAuthorizer struct {
valid map[string]struct{}
}
var _ Authorizer = &TokenAuthorizer{}
func NewTokenAuthorizer(validTokens []string) *TokenAuthorizer {
m := make(map[string]struct{})
for _, t := range validTokens {
m[t] = struct{}{}
}
return &TokenAuthorizer{valid: m}
}
func (t *TokenAuthorizer) Authorize(ctx context.Context) error {
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return AuthorizationRequired
}
tokens := md[MetadataKeyToken]
if len(tokens) != 1 || tokens[0] == "" {
return AuthorizationRequired
}
_, found := t.valid[tokens[0]]
if !found {
return InvalidAuthorization
}
return nil
}
<file_sep>package grpc
import (
"crypto/tls"
"fmt"
"github.com/golang/glog"
"golang.org/x/net/context"
"google.golang.org/grpc/metadata"
"net/http"
)
type KubernetesAuthorizer struct {
AuthURL string
TLSConfig *tls.Config
}
func NewKubernetesAuthorizer(authURL string, tlsConfig *tls.Config) *KubernetesAuthorizer {
k := &KubernetesAuthorizer{
AuthURL: authURL,
TLSConfig: tlsConfig,
}
return k
}
func (k *KubernetesAuthorizer) checkUsernamePassword(username, password string) error {
tr := &http.Transport{
TLSClientConfig: k.TLSConfig,
}
client := &http.Client{
Transport: tr,
}
req, err := http.NewRequest("GET", k.AuthURL, nil)
if err != nil {
return fmt.Errorf("error building request: %v", err)
}
req.SetBasicAuth(username, password)
response, err := client.Do(req)
if err != nil {
return fmt.Errorf("error making request: %v", err)
}
if response.StatusCode == 401 {
return InvalidAuthorization
}
if response.StatusCode == 200 {
return nil
}
return fmt.Errorf("unexpected status code: %v", response.Status)
}
func (k *KubernetesAuthorizer) Authorize(ctx context.Context) error {
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return AuthorizationRequired
}
usernames := md[MetadataKeyUsername]
if len(usernames) != 1 || usernames[0] == "" {
return AuthorizationRequired
}
username := usernames[0]
passwords := md[MetadataKeyPassword]
if len(passwords) != 1 || passwords[0] == "" {
return AuthorizationRequired
}
password := passwords[0]
err := k.checkUsernamePassword(username, password)
if err == nil {
return nil
}
if err == InvalidAuthorization {
return err
}
glog.Warningf("Unexpected response from kubernetes authorization: %v", err)
return InvalidAuthorization
}
<file_sep>package archive
import (
"kope.io/klogs/pkg/proto"
)
type Sink interface {
AddToArchive(sourcePath string, podUID string, fileInfo *proto.LogFile) error
}
<file_sep>package main // import "kope.io/klogs/cmd/klogs"
import (
goflag "flag"
"fmt"
"os"
)
var (
// value overwritten during build. This can be used to resolve issues.
version = "0.1"
gitRepo = "https://kope.io/klog"
)
func main() {
Execute()
}
// exitWithError will terminate execution with an error result
// It prints the error to stderr and exits with a non-zero exit code
func exitWithError(err error) {
fmt.Fprintf(os.Stderr, "\n%v\n", err)
os.Exit(1)
}
func Execute() {
goflag.Set("logtostderr", "true")
goflag.CommandLine.Parse([]string{})
rootCommand, err := NewRootCommand(os.Stdout)
if err != nil {
exitWithError(err)
}
if err := rootCommand.Execute(); err != nil {
exitWithError(err)
}
}
<file_sep># TODO: Move entirely to bazel?
.PHONY: images
protobuf:
protoc -I ./pkg/proto ./pkg/proto/log.proto --go_out=plugins=grpc:pkg/proto
gofmt:
gofmt -w -s cmd/
gofmt -w -s pkg/
push: images
docker push kopeio/logging-spoke:latest
docker push kopeio/logging-hub:latest
images:
bazel run //images:klog-spoke
docker tag bazel/images:klog-spoke kopeio/logging-spoke:latest
bazel run //images:klog-hub
docker tag bazel/images:klog-hub kopeio/logging-hub:latest
<file_sep>package main // import "kope.io/klogs/cmd/klog-spoke"
import (
goflag "flag"
"fmt"
"github.com/golang/glog"
"github.com/spf13/pflag"
"io/ioutil"
"kope.io/klogs/pkg/logspoke"
"net"
"os"
"strings"
)
var (
// value overwritten during build. This can be used to resolve issues.
version = "0.1"
gitRepo = "https://kope.io/klog"
)
func main() {
flags := pflag.NewFlagSet("", pflag.ExitOnError)
options := logspoke.Options{}
options.SetDefaults()
podIP, err := findPodIP()
if err != nil {
fmt.Fprintf(os.Stderr, "r: %v\n", err)
os.Exit(1)
}
options.Listen = "http://" + podIP.String() + ":7777"
flags.StringVar(&options.PodDir, "pod-dir", options.PodDir, "Directory where pods files are stored")
flags.StringVar(&options.ContainerDir, "container-dir", options.ContainerDir, "Directory where container files are stored")
flags.StringVar(&options.ArchiveSink, "archive", options.ArchiveSink, "Destination to upload archived files")
flags.StringVar(&options.JoinHub, "hub", options.JoinHub, "Hub server to register with")
flags.StringVar(&options.Listen, "listen", options.Listen, "Address on which to listen")
flags.StringVar(&options.NodeName, "nodename", options.NodeName, "Node name, or @path to load from path")
// Trick to avoid 'logging before flag.Parse' warning
goflag.CommandLine.Parse([]string{})
goflag.Set("logtostderr", "true")
flags.AddGoFlagSet(goflag.CommandLine)
//clientConfig := kubectl_util.DefaultClientConfig(flags)
args := os.Args
flagsPath := "/config/flags.yaml"
_, err = os.Lstat(flagsPath)
if err == nil {
flagsFile, err := ioutil.ReadFile(flagsPath)
if err != nil {
glog.Fatalf("error reading %q: %v", flagsPath, err)
}
for _, line := range strings.Split(string(flagsFile), "\n") {
line = strings.TrimSpace(line)
args = append(args, line)
}
} else if !os.IsNotExist(err) {
glog.Infof("Cannot read %q: %v", flagsPath, err)
}
flags.Parse(args)
glog.Infof("klog-spoke - build: %v - %v", gitRepo, version)
s, err := logspoke.NewLogShipper(&options)
if err != nil {
fmt.Fprintf(os.Stderr, "unexpected error: %v\n", err)
os.Exit(1)
}
err = s.Run()
if err != nil {
fmt.Fprintf(os.Stderr, "unexpected error: %v\n", err)
os.Exit(1)
}
}
func findPodIP() (net.IP, error) {
var ips []net.IP
networkInterfaces, err := net.Interfaces()
if err != nil {
return nil, fmt.Errorf("error querying interfaces to determine pod ip: %v", err)
}
for i := range networkInterfaces {
networkInterface := &networkInterfaces[i]
flags := networkInterface.Flags
name := networkInterface.Name
if (flags & net.FlagLoopback) != 0 {
glog.V(2).Infof("Ignoring interface %s - loopback", name)
continue
}
// Not a lot else to go on...
if !strings.HasPrefix(name, "eth") {
glog.V(2).Infof("Ignoring interface %s - name does not look like ethernet device", name)
continue
}
addrs, err := networkInterface.Addrs()
if err != nil {
return nil, fmt.Errorf("error querying network interface %s for IP adddresses: %v", name, err)
}
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if err != nil {
return nil, fmt.Errorf("error parsing address %s on network interface %s: %v", addr.String(), name, err)
}
if ip.IsLoopback() {
glog.V(2).Infof("Ignoring address %s (loopback)", ip)
continue
}
if ip.IsLinkLocalMulticast() || ip.IsLinkLocalUnicast() {
glog.V(2).Infof("Ignoring address %s (link-local)", ip)
continue
}
ips = append(ips, ip)
}
}
if len(ips) == 0 {
return nil, fmt.Errorf("unable to determine pod ip (no adddresses found)")
}
if len(ips) != 1 {
glog.Warningf("Found multiple pod IPs; making arbitrary choice")
for _, ip := range ips {
glog.Warningf("\tip: %s", ip.String())
}
}
return ips[0], nil
}
<file_sep>package grpc
import (
"fmt"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"net/url"
"strings"
)
const MetadataKeyToken = "token"
const MetadataKeyUsername = "user"
const MetadataKeyPassword = "<PASSWORD>"
type GRPCClientOptions struct {
Server string
Token string
Username string
Password string
}
type tokenCreds struct {
Token string
}
func (c *tokenCreds) GetRequestMetadata(context.Context, ...string) (map[string]string, error) {
return map[string]string{
MetadataKeyToken: c.Token,
}, nil
}
func (c *tokenCreds) RequireTransportSecurity() bool {
return true
}
type basicAuthCreds struct {
Username string
Password string
}
func (c *basicAuthCreds) GetRequestMetadata(context.Context, ...string) (map[string]string, error) {
return map[string]string{
MetadataKeyUsername: c.Username,
MetadataKeyPassword: c.<PASSWORD>,
}, nil
}
func (c *basicAuthCreds) RequireTransportSecurity() bool {
return true
}
func NewGRPCClient(options *GRPCClientOptions) (*grpc.ClientConn, error) {
u, err := url.Parse(options.Server)
if err != nil {
return nil, fmt.Errorf("Invalid server url %q", options.Server)
}
var opts []grpc.DialOption
if u.Scheme == "http" {
opts = append(opts, grpc.WithInsecure())
} else if u.Scheme == "https" {
// TODO: Unclear if we need to set this. Feels prudent!
sn := u.Host
colonIndex := strings.Index(sn, ":")
if colonIndex != -1 {
sn = sn[:colonIndex]
}
var creds credentials.TransportCredentials
//if *caFile != "" {
// var err error
// creds, err = credentials.NewClientTLSFromFile(*caFile, sn)
// if err != nil {
// grpclog.Fatalf("Failed to create TLS credentials %v", err)
// }
//} else {
// creds = credentials.NewClientTLSFromCert(nil, sn)
//}
creds = credentials.NewClientTLSFromCert(nil, sn)
opts = append(opts, grpc.WithTransportCredentials(creds))
} else {
return nil, fmt.Errorf("unknown scheme %q", u.Scheme)
}
if options.Token != "" {
opts = append(opts, grpc.WithPerRPCCredentials(&tokenCreds{Token: options.Token}))
} else if options.Username != "" {
opts = append(opts, grpc.WithPerRPCCredentials(&basicAuthCreds{Username: options.Username, Password: <PASSWORD>}))
}
conn, err := grpc.Dial(u.Host, opts...)
if err != nil {
return nil, fmt.Errorf("failed to connect to server %q: %v", u.Host, err)
}
return conn, nil
}
<file_sep>package logspoke
import (
"fmt"
"github.com/golang/glog"
"io/ioutil"
"kope.io/klogs/pkg/archive"
"kope.io/klogs/pkg/archive/s3archive"
"net/url"
"strings"
)
type Options struct {
PodDir string
ContainerDir string
ArchiveSink string
Listen string
JoinHub string
NodeName string
}
func (o *Options) SetDefaults() {
o.PodDir = "/var/lib/kubelet/pods"
o.ContainerDir = "/var/lib/docker/containers"
o.Listen = "http://:7777"
o.NodeName = "@/etc/hostname"
}
type LogShipper struct {
scraper *Scraper
logServer *LogServer
meshMember *MeshMember
}
func NewLogShipper(options *Options) (*LogShipper, error) {
l := &LogShipper{}
if strings.HasPrefix(options.NodeName, "@") {
f := options.NodeName[1:]
nodeName, err := ioutil.ReadFile(f)
if err != nil {
return nil, fmt.Errorf("error reading node name from %q: %v", f, err)
}
options.NodeName = strings.TrimSpace(string(nodeName))
glog.Infof("Read hostname from %q: %q", f, options.NodeName)
}
var archiveSink archive.Sink
if options.ArchiveSink != "" {
u, err := url.Parse(options.ArchiveSink)
if err != nil {
return nil, fmt.Errorf("invalid ArchiveSink %q: %q", options.ArchiveSink, err)
}
if u.Scheme == "s3" {
archiveSink, err = s3archive.NewSink(u)
if err != nil {
return nil, err
}
} else {
return nil, fmt.Errorf("unknown scheme in ArchivePath %q", options.ArchiveSink)
}
}
nodeState := newNodeState(archiveSink)
logServer, err := newLogServer(options, nodeState)
if err != nil {
return nil, err
}
l.logServer = logServer
scraper, err := newScraper(options, nodeState)
if err != nil {
return nil, err
}
l.scraper = scraper
if options.JoinHub != "" {
l.meshMember, err = newMeshMember(options)
if err != nil {
return nil, err
}
}
return l, nil
}
func (l *LogShipper) Run() error {
go l.scraper.Run()
if l.meshMember != nil {
go l.meshMember.Run()
}
return l.logServer.Run()
}
<file_sep>package logspoke
import (
"bufio"
"bytes"
"compress/gzip"
"encoding/json"
"fmt"
"github.com/golang/glog"
"io"
"k8s.io/client-go/pkg/api/v1"
"kope.io/klogs/pkg/archive"
"kope.io/klogs/pkg/proto"
"math"
"os"
"strings"
"sync"
"time"
)
const LineBufferSize = 1024 * 1024
const chunkFlushSize = 64 * 1024
var idlePeriod = time.Minute * 15
type NodeState struct {
nodeFields *proto.Fields
archiveSink archive.Sink
mutex sync.Mutex
pods map[string]*PodState
containers map[string]*ContainerState
}
type PodState struct {
nodeState *NodeState
uid string
mutex sync.Mutex
streamInfo proto.StreamInfo
podObject *v1.Pod
logs *LogsState
}
type ContainerState struct {
nodeState *NodeState
id string
mutex sync.Mutex
streamInfo proto.StreamInfo
logs *LogsState
labels map[string]string
}
type LogsState struct {
mutex sync.Mutex
logs map[string]*LogFile
archived map[string]*LogFile
}
type LogFile struct {
model proto.LogFile
}
func (l *LogFile) canMatch(request *proto.SearchRequest) (bool, []*proto.FieldFilter) {
if len(request.FieldFilters) == 0 {
return true, nil
}
unmatched := make([]*proto.FieldFilter, 0, len(request.FieldFilters))
for _, filter := range request.FieldFilters {
mismatch := false
processed := false
// TODO: Well known fields
if filter.Key == "@timestamp" {
// TODO: Non-string values
t, err := time.Parse(time.RFC3339Nano, filter.Value)
if err != nil {
glog.Warningf("ignoring error parsing @timestamp value %q", filter.Value)
continue
}
switch filter.Op {
case proto.FieldFilterOperator_GTE:
if l.model.MaxTimestamp != 0 && l.model.MaxTimestamp < uint64(t.UnixNano()) {
mismatch = true
}
default:
glog.Warningf("Unhandled operator: %v", filter)
}
} else {
for _, actual := range l.model.Fields.Fields {
if filter.Key == actual.Key {
processed = true
switch filter.Op {
case proto.FieldFilterOperator_NOT_EQ:
if actual.Value == filter.Value {
mismatch = true
}
case proto.FieldFilterOperator_EQ:
if actual.Value != filter.Value {
mismatch = true
}
default:
glog.Warningf("Unhandled operator: %v", filter)
}
break
}
}
}
if mismatch {
return false, nil
}
if !processed {
unmatched = append(unmatched, filter)
}
}
return true, unmatched
}
func newNodeState(archiveSink archive.Sink) *NodeState {
s := &NodeState{
archiveSink: archiveSink,
pods: make(map[string]*PodState),
containers: make(map[string]*ContainerState),
}
return s
}
func (s *NodeState) CleanupPodLogs(ids []string) {
idMap := make(map[string]struct{}, len(ids))
for _, k := range ids {
idMap[k] = struct{}{}
}
s.mutex.Lock()
defer s.mutex.Unlock()
for _, p := range s.pods {
func() {
p.mutex.Lock()
defer p.mutex.Unlock()
_, found := idMap[p.uid]
if !found {
glog.V(2).Infof("Removing pod logs state: %q", p.uid)
p.logs = nil
glog.Warningf("TODO: Remove pods when no state left")
}
}()
}
}
func (s *NodeState) CleanupContainerLogs(ids []string) {
idMap := make(map[string]struct{}, len(ids))
for _, k := range ids {
idMap[k] = struct{}{}
}
s.mutex.Lock()
defer s.mutex.Unlock()
for _, p := range s.containers {
func() {
p.mutex.Lock()
defer p.mutex.Unlock()
_, found := idMap[p.id]
if !found {
glog.V(2).Infof("Removing container logs state: %q", p.id)
p.logs = nil
glog.Warningf("TODO: Remove containers when no state left")
}
}()
}
}
func (s *NodeState) GetPodState(uid string) *PodState {
s.mutex.Lock()
defer s.mutex.Unlock()
pod := s.pods[uid]
if pod == nil {
pod = &PodState{
nodeState: s,
uid: uid,
}
pod.streamInfo.PodUid = uid
s.pods[uid] = pod
}
return pod
}
func (s *NodeState) GetContainerState(containerid string) *ContainerState {
s.mutex.Lock()
defer s.mutex.Unlock()
container := s.containers[containerid]
if container == nil {
container = &ContainerState{
nodeState: s,
id: containerid,
}
s.containers[containerid] = container
}
return container
}
func newLogsState() *LogsState {
l := &LogsState{
logs: make(map[string]*LogFile),
archived: make(map[string]*LogFile),
}
return l
}
func (l *LogsState) foundFile(sourcePath string, relativePath string, stat os.FileInfo, fields *proto.Fields) error {
l.mutex.Lock()
defer l.mutex.Unlock()
modTime := stat.ModTime()
logFile := l.logs[sourcePath]
modified := true
if logFile != nil {
if logFile.model.LastModified == modTime.Unix() && logFile.model.Size == stat.Size() {
modified = false
glog.V(4).Infof("File not modified: %q", sourcePath)
}
} else {
logFile = &LogFile{
model: proto.LogFile{
Path: relativePath,
LastModified: modTime.Unix(),
Size: stat.Size(),
Fields: fields,
},
}
l.logs[sourcePath] = logFile
}
if modified {
_, maxTimestamp, err := findMaxTimestamp(sourcePath)
if err != nil {
glog.Warningf("error finding max timestamp for %q: %v", sourcePath, err)
logFile.model.MaxTimestamp = 0
} else {
logFile.model.MaxTimestamp = uint64(maxTimestamp)
}
}
return nil
}
func (p *ContainerState) foundFile(sourcePath string, relativePath string, stat os.FileInfo, fields *proto.Fields) error {
p.mutex.Lock()
defer p.mutex.Unlock()
if p.logs == nil {
p.logs = newLogsState()
}
return p.logs.foundFile(sourcePath, relativePath, stat, fields)
}
func (p *PodState) foundFile(sourcePath string, relativePath string, stat os.FileInfo, fields *proto.Fields) error {
p.mutex.Lock()
defer p.mutex.Unlock()
if p.logs == nil {
p.logs = newLogsState()
}
// TODO: Move to shared LogState.foundFile code; move archiving elsewhere
modTime := stat.ModTime()
now := time.Now()
if modTime.Add(idlePeriod).Before(now) {
logFile := p.logs.logs[sourcePath]
modified := false
if logFile != nil {
if logFile.model.LastModified == modTime.Unix() && logFile.model.Size == stat.Size() {
glog.V(4).Infof("File not modified: %q", sourcePath)
} else {
modified = true
}
} else {
logFile = &LogFile{
model: proto.LogFile{
Path: relativePath,
LastModified: modTime.Unix(),
Size: stat.Size(),
Fields: fields,
},
}
p.logs.logs[sourcePath] = logFile
modified = true
}
if modified && p.nodeState.archiveSink != nil {
archived := p.logs.archived[relativePath]
if archived == nil || *archived != *logFile {
glog.Warningf("Should not hold lock while archiving file")
err := p.nodeState.archiveSink.AddToArchive(sourcePath, p.uid, &logFile.model)
if err != nil {
glog.Warningf("error adding file %q to archive: %v", sourcePath, err)
} else {
p.logs.archived[relativePath] = logFile
}
}
}
}
return nil
}
var _ proto.LogServerServer = &NodeState{}
func (s *NodeState) GetStreams(request *proto.GetStreamsRequest, out proto.LogServer_GetStreamsServer) error {
s.mutex.Lock()
defer s.mutex.Unlock()
glog.V(2).Infof("GetStreamsRequest %q", request)
for _, p := range s.pods {
err := func() error {
p.mutex.Lock()
defer p.mutex.Unlock()
err := out.Send(&p.streamInfo)
if err != nil {
return err
}
return nil
}()
if err != nil {
return err
}
}
for _, p := range s.containers {
err := func() error {
p.mutex.Lock()
defer p.mutex.Unlock()
err := out.Send(&p.streamInfo)
if err != nil {
return err
}
return nil
}()
if err != nil {
return err
}
}
return nil
}
type fileScanOperation struct {
sourcePath string
fields *proto.Fields
unmatched []*proto.FieldFilter
}
func (s *NodeState) Search(request *proto.SearchRequest, out proto.LogServer_SearchServer) error {
glog.Warningf("TODO: Scan files before search?")
var ops []*fileScanOperation
func() {
s.mutex.Lock()
defer s.mutex.Unlock()
glog.V(2).Infof("Search %q", request)
for _, p := range s.pods {
func() {
p.mutex.Lock()
defer p.mutex.Unlock()
if p.logs != nil {
for k, l := range p.logs.logs {
canMatch, unmatched := l.canMatch(request)
if !canMatch {
continue
}
ops = append(ops, &fileScanOperation{
sourcePath: k,
fields: l.model.Fields,
unmatched: unmatched,
})
}
}
}()
}
for _, p := range s.containers {
func() {
p.mutex.Lock()
defer p.mutex.Unlock()
if p.logs != nil {
for k, l := range p.logs.logs {
canMatch, unmatched := l.canMatch(request)
if !canMatch {
glog.V(2).Infof("Excluded file %s size=%d maxTimestamp=%d %v", k, l.model.Size, l.model.MaxTimestamp, l.model.Fields)
continue
}
// TODO: Skip if size 0? ... maybe only if file is "closed"
glog.V(2).Infof("Unable to exclude file %s size=%d maxTimestamp=%d %v", k, l.model.Size, l.model.MaxTimestamp, l.model.Fields)
ops = append(ops, &fileScanOperation{
sourcePath: k,
fields: l.model.Fields,
unmatched: unmatched,
})
}
}
}()
}
}()
// TODO: Build callback class
var matchBytes []byte
if request.Contains != "" {
glog.Warningf("JSON match encoding not yet implemented")
matchBytes = []byte(request.Contains)
}
matcher := func(line []byte) bool {
if matchBytes != nil {
if bytes.Index(line, matchBytes) == -1 {
return false
}
}
return true
}
buffer := make([]byte, LineBufferSize, LineBufferSize)
for _, l := range ops {
err := l.searchLogFile(buffer, matcher, request, out)
if err != nil {
glog.Warningf("error searching log file %q: %v", l, err)
return fmt.Errorf("error searching log file %q: %v", l, err)
}
}
return nil
}
type dockerLine struct {
Log string `json:"log,omitempty"`
Stream string `json:"stream,omitempty"`
Time string `json:"time,omitempty"`
}
func (s *fileScanOperation) searchLogFile(buffer []byte, matcher func([]byte) bool, request *proto.SearchRequest, out proto.LogServer_SearchServer) error {
glog.V(2).Infof("search log file %q: %v", s.sourcePath, s.unmatched)
// TODO: Skip if size 0?
var in io.Reader
f, err := os.OpenFile(s.sourcePath, os.O_RDONLY, 0)
if err != nil {
if os.IsNotExist(err) {
glog.V(2).Infof("ignoring log file that no longer exists %q", s.sourcePath)
return nil
} else {
glog.Warningf("ignoring error opening log file %q: %v", s.sourcePath, err)
return nil
}
}
defer f.Close()
in = f
if strings.HasSuffix(s.sourcePath, ".gz") {
gz, err := gzip.NewReader(in)
if err != nil {
return fmt.Errorf("error building gzip decompressor for %q: %v", s.sourcePath, err)
}
defer gz.Close()
in = gz
}
var chunk *proto.SearchResultChunk
var chunkSize int
scanner := bufio.NewScanner(in)
scanner.Buffer(buffer, cap(buffer))
for scanner.Scan() {
line := scanner.Bytes()
if !matcher(line) {
continue
}
if chunk == nil {
chunk = &proto.SearchResultChunk{}
chunkSize = 32
chunk.CommonFields = s.fields
}
item := &proto.SearchResult{}
item.Raw = line
var l dockerLine
err = json.Unmarshal(line, &l)
if err == nil {
fields := item.Fields
if fields == nil {
fields = &proto.Fields{}
item.Fields = fields
}
if l.Log != "" {
fields.Fields = append(fields.Fields, &proto.Field{
Key: "log",
Value: l.Log,
})
chunkSize += 8 + len(l.Log)
}
if l.Stream != "" {
fields.Fields = append(fields.Fields, &proto.Field{
Key: "stream",
Value: l.Stream,
})
chunkSize += 8 + len(l.Stream)
}
if l.Time != "" {
t, err := time.Parse(time.RFC3339Nano, l.Time)
if err == nil {
item.Timestamp = uint64(t.UnixNano())
}
chunkSize += 10
}
}
if len(s.unmatched) != 0 {
itemFields := item.Fields
if itemFields == nil {
continue
}
match := true
for _, filter := range s.unmatched {
// TODO: Well known fields
if filter.Key == "@timestamp" {
// TODO: What if no timestamp?
// TODO: Non-string values
t, err := time.Parse(time.RFC3339Nano, filter.Value)
if err != nil {
glog.Warningf("ignoring error parsing @timestamp value %q", filter.Value)
continue
}
switch filter.Op {
case proto.FieldFilterOperator_GTE:
if !(item.Timestamp >= uint64(t.UnixNano())) {
match = false
}
default:
glog.Warningf("Unhandled operator: %v", filter)
}
} else {
found := false
for _, actual := range itemFields.Fields {
if actual.Key == filter.Key {
found = true
switch filter.Op {
case proto.FieldFilterOperator_NOT_EQ:
if actual.Value == filter.Value {
match = false
}
case proto.FieldFilterOperator_EQ:
if actual.Value != filter.Value {
match = false
}
default:
glog.Warningf("Unhandled operator: %v", filter)
}
break
}
}
if !found {
match = false
}
}
if !match {
break
}
}
if !match {
continue
}
}
chunk.Items = append(chunk.Items, item)
chunkSize += 8 + len(line)
if chunkSize > chunkFlushSize {
if err := out.Send(chunk); err != nil {
return err
}
chunk = nil
}
}
if chunk != nil {
if err := out.Send(chunk); err != nil {
return err
}
}
if err := scanner.Err(); err != nil {
glog.Warningf("error reading log file %q: %v", s.sourcePath, err)
}
return nil
}
func findMaxTimestamp(sourcePath string) (uint64, uint64, error) {
buffer := make([]byte, LineBufferSize, LineBufferSize)
glog.V(2).Infof("findMaxTimestamp for %q", sourcePath)
var in io.Reader
f, err := os.OpenFile(sourcePath, os.O_RDONLY, 0)
if err != nil {
if os.IsNotExist(err) {
glog.V(2).Infof("ignoring log file that no longer exists %q", sourcePath)
return 0, 0, err
} else {
return 0, 0, err
}
}
defer f.Close()
// TODO: rotate, attach metadata
glog.Warningf("findMaxTimestamp is very inefficient")
in = f
if strings.HasSuffix(sourcePath, ".gz") {
gz, err := gzip.NewReader(in)
if err != nil {
return 0, 0, fmt.Errorf("error building gzip decompressor for %q: %v", sourcePath, err)
}
defer gz.Close()
in = gz
}
minTimestamp := uint64(math.MaxUint64)
maxTimestamp := uint64(0)
scanner := bufio.NewScanner(in)
scanner.Buffer(buffer, cap(buffer))
for scanner.Scan() {
line := scanner.Bytes()
// TODO: Don't bother parsing unless we are at the end?
var l dockerLine
err = json.Unmarshal(line, &l)
if err == nil {
if l.Time != "" {
t, err := time.Parse(time.RFC3339Nano, l.Time)
if err == nil {
ts := uint64(t.UnixNano())
if ts > maxTimestamp {
maxTimestamp = ts
}
if ts < minTimestamp {
minTimestamp = ts
}
}
}
}
}
if err := scanner.Err(); err != nil {
return 0, 0, fmt.Errorf("error reading log file %q: %v", sourcePath, err)
}
return minTimestamp, maxTimestamp, nil
}
|
414900900b757c157a75c503dbe85288850d10c2
|
[
"Markdown",
"Go",
"Makefile"
] | 30 |
Go
|
kopeio/klogs
|
f8dbb27bd89693ad9c1d634066dbdb5ca66fe07d
|
9bb72f27c6fd9b64525736dc87446cc20b33bfc9
|
refs/heads/master
|
<file_sep>import React from 'react';
import './PetListItem.css';
function PetListItem(props) {
const {
imageURL,
name,
age,
breed,
gender,
description,
story,
} = props.petObj;
function renderPet() {
if (props.petObj) {
return (
<>
<img src={imageURL} alt={description}></img>
<ul>
<li>Name: {name}</li>
<li>Breed: {breed}</li>
<li>Age: {age}</li>
<li>Gender: {gender}</li>
<li>Description: {description}</li>
<li>Story: {story}</li>
</ul>
</>
);
}
return null;
}
return <>{renderPet()}</>;
}
export default PetListItem;
<file_sep>import React, { useState } from 'react';
import { Link } from 'react-router-dom';
import { slide as Menu } from 'react-burger-menu';
import './VeganburgerMenu.css';
function VeganburgerMenu(props) {
const [isOpen, setIsOpen] = useState(false);
function closeMenu() {
setIsOpen(false);
}
function isMenuOpen(state) {
if (state.isOpen === isOpen) {
return;
}
setIsOpen(state.IsOpen);
}
return (
<Menu
isOpen={isOpen}
onStateChange={isMenuOpen}
right>
<Link
className='menu-item'
onClick={closeMenu}
to='/'>
Home
</Link>
<Link
className='menu-item'
onClick={closeMenu}
to='/adopt'>
Adopt
</Link>
</Menu>
);
}
export default VeganburgerMenu;
<file_sep>import React from 'react';
import './Home.css';
function Home(props) {
function getInLine() {
props.history.push('/adopt');
}
return (
<div id='Home'>
<img src='images/happy-cat.jpg' alt='Cat smiling'></img>
<h3>Welcome to Pet Agree!</h3>
<p>
There is a unique adoption process for our clients. Anyone wishing to
adopt must enter a queue. Upon reaching the front of the line, you will
be able to see a description of a cat and dog, along with a picture of
each. You may choose both, or just one, but in either case, you are only
adopting the one that is at the front of the queue. Once you've chosen
your new furry friend, you will recieve a notification alerting you that
the adoption was succeessful. You then leave the queue and the person
behind you may select a pet.
</p>
<p>We do not sell leashes or treats.</p>
<button onClick={getInLine}>Get in line</button>
</div>
);
}
export default Home;
<file_sep>## **Pet Agree**
A simulated website for adopting animals, get in line and take Scruffy home!
## Live app
https://pet-agree.vercel.app/
## Screenshots

## Client repo
https://github.com/thinkful-ei-panda/DSA-Petful-Nick-Chatchawan-Client.git
## API repo
https://github.com/thinkful-ei-panda/DSA-Petful-Nick-Chatchawan-Server.git
## Collaborators
<NAME>
<NAME>
## Summary
This web app allows users to see which pets are available for adoption at the front of the adoption queue.
## Technology
javaScript
HTML
CSS
React.js
Express
Node.js
Affinity Designer
Vercel
Heroku
DSA - Singly-linked-list as Queue
|
c3ac48d1b4843a726dafac457b321c9d491abad6
|
[
"JavaScript",
"Markdown"
] | 4 |
JavaScript
|
jenthura/DSA-Petful-Nick-Chatchawan-Client
|
806c21d06db2bc3079926172c127fdc4b314a363
|
46bf77aa813965de0a6173b13ab4c5a651f9db18
|
refs/heads/main
|
<file_sep>import { Dispatch } from 'redux'
import supportedExtensions from '../../../constants/supportedExtentions'
import FileViewerStructure from '../../../types/FileViewerStructure'
import { addActiveFile, setEditorAciveFile } from '../../reducers/files/reducer'
import { RootState } from '../../store'
const openFile = (node: FileViewerStructure) => (dispatch: Dispatch, getState: () => RootState) => {
const { extension: fileExtension = '', id: fileId, children } = node
if (children || !supportedExtensions[fileExtension]) {
return
}
const state = getState()
const activeFiles = state.files.activeFiles
if (!activeFiles.includes(fileId)) {
dispatch(addActiveFile(fileId))
}
dispatch(setEditorAciveFile(fileId))
}
export default openFile
<file_sep>import { Dispatch } from 'redux'
import { removeActiveFile, setEditorAciveFile } from '../../reducers/files/reducer'
import { RootState } from '../../store'
const getNewActiveFile = (activeFileIds: string[], activeFilesLength: number, fileId: string) => {
const fileToBeRemovedIndex = activeFileIds.indexOf(fileId)
if (fileToBeRemovedIndex + 1 === activeFilesLength) {
return activeFileIds[fileToBeRemovedIndex - 1]
}
return activeFileIds[fileToBeRemovedIndex + 1]
}
const closeFile = (fileId: string) => (dispatch: Dispatch, getState: () => RootState) => {
const state = getState()
const { activeFiles, editorActiveFile } = state.files
const activeFilesLength = activeFiles.length
if (activeFilesLength >= 2) {
const newActiveFileId = getNewActiveFile(activeFiles, activeFilesLength, fileId)
if (editorActiveFile === fileId || editorActiveFile === newActiveFileId) {
dispatch(setEditorAciveFile(newActiveFileId))
}
} else {
dispatch(setEditorAciveFile(null))
}
dispatch(removeActiveFile(fileId))
}
export default closeFile
<file_sep>const supportedExtensions: { [key: string]: string } = {
js: 'javascript',
jsx: 'javascript',
ts: 'typescript',
tsx: 'typescript',
py: 'python',
rb: 'ruby',
java: 'java',
go: 'go',
html: 'html',
php: 'php',
css: 'css',
json: 'json',
}
export default supportedExtensions
<file_sep>export default interface FileViewerStructure {
id: string
name: string
extension?: string
children?: FileViewerStructure[]
}
<file_sep>export default interface UserFile {
id: string
name: string
relativePath: string
code: string
extension: string
}
<file_sep>import { combineReducers } from 'redux'
import darkModeReducer from './reducers/dark-mode/reducer'
import filesReducer from './reducers/files/reducer'
const rootReducer = combineReducers({
darkMode: darkModeReducer,
files: filesReducer,
})
export default rootReducer
<file_sep>import { configureStore, getDefaultMiddleware } from '@reduxjs/toolkit'
import { FLUSH, REHYDRATE, PAUSE, PERSIST, PURGE, REGISTER } from 'redux-persist/es/constants'
import persistStore from 'redux-persist/es/persistStore'
import persistedReducer from './persistConfig'
const ignoredActions = [FLUSH, REHYDRATE, PAUSE, PERSIST, PURGE, REGISTER]
const store = configureStore({
reducer: persistedReducer,
middleware: getDefaultMiddleware({
serializableCheck: {
ignoredActions,
},
}),
devTools: process.env.NODE_ENV !== 'production',
})
export const persistor = persistStore(store)
export type RootState = ReturnType<typeof store.getState>
export type AppDispatch = typeof store.dispatch
export default store
<file_sep>import { createSlice } from '@reduxjs/toolkit'
export const initialState = false
const darkModeSlice = createSlice({
name: 'darkMode',
initialState,
reducers: {
toggleDarkMode(state) {
return !state
},
},
})
export const { toggleDarkMode } = darkModeSlice.actions
const darkModeReducer = darkModeSlice.reducer
export default darkModeReducer
<file_sep>export type CustomFile = Partial<File> &
Blob & {
webkitRelativePath?: string
}
<file_sep>import persistReducer from 'redux-persist/es/persistReducer'
import storage from 'redux-persist/lib/storage'
import rootReducer from './rootReducer'
const persistConfig = {
key: 'root',
storage,
whitelist: ['darkMode'],
}
const persistedReducer = persistReducer(persistConfig, rootReducer)
export default persistedReducer
|
b089c33f66316259dd14ff101b20116eea0f7a91
|
[
"TypeScript"
] | 10 |
TypeScript
|
mez32/TS-Code-Editor
|
ad189d6ade70625a6e86a1eebff3167beedc258d
|
546e9ede0dbcf143dc787485f7a7b8456754605b
|
refs/heads/master
|
<file_sep>const companies = [
{name: "Company One", category: "Finance", start: 1981, end: 2003},
{name: "Company Two", category: "Retail", start: 1992, end: 2008},
{name: "Company Three", category: "Auto", start: 1999, end: 2007},
{name: "Company Four", category: "Retail", start: 1989, end: 2010},
{name: "Company Five", category: "Technology", start: 2009, end: 2014},
{name: "Company Six", category: "Finance", start: 1987, end: 2010},
{name: "Company Seven", category: "Auto", start: 1986, end: 1996},
{name: "Company Eight", category: "Technology", start: 2011, end: 2016},
{name: "Company Nine", category: "Retail", start: 1981, end: 1989}
];
const ages = [33, 12, 20, 16, 5, 54, 21, 44, 61, 13, 15, 45, 25, 64, 32];
//Foreach looping
// companies.forEach(function (company,index) {
// console.log(company);
// console.log(index);
// });
// companies.forEach(company => console.log(company));
//Filter
// const Teenages = ages.filter(function (age) {
// if(age > 20) return true;
// });
// const Teenages = ages.filter(age => age > 20)
// console.log(Teenages);
// const old_companies = companies.filter(company => (company.end - company.start) >= 10 )
// console.log(old_companies)
//Map
// const DoubleAges = ages.map(age => age * 2);
// const DoubleAges = ages
// .map(age => age / 2)
// .map(age => Math.pow(age,2));
// console.log(DoubleAges);
//Sort
// const SortAges = ages.sort((a,b) => a > b ? 1 : -1);
// console.log(SortAges);
//Reduce
// const AgeSum = ages.reduce((total,age) => total+age,0);
// console.log(AgeSum);<file_sep># JS-Tricks-Hacks
This repository will demonstrate JavaScript basic tricks
## Topics Covered
Here some basic topics that helps good foundationn for your JavaScript skills
|
b336bb40d320b3229d4cbfa73370aa0da0584199
|
[
"JavaScript",
"Markdown"
] | 2 |
JavaScript
|
DEV-VJ/JS-Tricks-Hacks
|
0dc27d5d98ae98ad08062100a79f22df6d4f8ae4
|
76050acfe17a5e5017fa337ba43100fdba1f32f2
|
refs/heads/master
|
<repo_name>ChinhDuong/dqc-lib-source-link<file_sep>/dqc.lib/dqc.lib/Operation.cs
๏ปฟusing System;
namespace dqc.lib
{
public class Operation
{
public int Add(int operand1, int operand2)
{
return operand1 + operand2;
}
}
}
|
4d917167abd41c0fe9cb4b4d5d9f9dc9e28e154e
|
[
"C#"
] | 1 |
C#
|
ChinhDuong/dqc-lib-source-link
|
a23bbd91982aad7cc85e0e96a63407c0e4c6f316
|
03d3c350e5680439aafbcffb702169a0511decfc
|
refs/heads/master
|
<repo_name>Tanguy-L/noidea-front<file_sep>/.env.example
BASE_URL="http://172.16.17.32:3000"
<file_sep>/.d.ts
declare module 'vue-datepicker-local'
<file_sep>/nuxt.config.js
require("dotenv").config();
export default {
mode: "universal",
/*
** Headers of the page
*/
head: {
title: process.env.npm_package_name || "",
meta: [
{ charset: "utf-8" },
{ name: "viewport", content: "width=device-width, initial-scale=1" },
{
hid: "description",
name: "description",
content: process.env.npm_package_description || ""
}
],
link: [
{ rel: "icon", type: "image/x-icon", href: "/favicon.ico" },
{
rel: "stylesheet",
href:
"https://fonts.googleapis.com/css?family=Dosis|Montserrat&display=swap"
},
{
rel: "stylesheet",
href: "https://fonts.googleapis.com/icon?family=Material+Icons"
}
]
},
/*
** Customize the progress-bar color
*/
loading: { color: "#fff" },
/*
** Global CSS
*/
css: ["./assets/main.css", "./assets/utilities.css"],
/*
** Plugins to load before mounting the App
*/
plugins: [{ src: "~/plugins/vue-datepicker.js", ssr: false }],
env: {
baseUrl: process.env.BASE_URL || "http://localhost:3000"
},
server: {
port: 8081, // par dรฉfaut: 3000
host: "0.0.0.0" // par dรฉfaut: localhost
},
watchers: {
webpack: {
poll: true
}
},
modules: ["@nuxtjs/axios", "nuxt-socket-io"],
io: {
sockets: [
{
name: "home",
url: "http://localhost:5000",
default: true,
vuex: {
// optional
mutations: [{ message: "SET_STATUS" }]
}
}
]
},
axios: {
host: "http://172.23.0.3",
port: 3000
}
};
<file_sep>/plugins/vue-datepicker.js
import Datepicker from "vue-datepicker-local";
import Vue from "vue";
Vue.use(Datepicker, { name: "vue-datepicker-local" });
|
6cf31b3ecf2cc3be32900ae79ef2c17a78c93420
|
[
"JavaScript",
"TypeScript",
"Shell"
] | 4 |
Shell
|
Tanguy-L/noidea-front
|
36c3deaea3e37b6c41a143928516a946aa4aa513
|
7467b55885d139620a64ff6c743a786523eb5b78
|
refs/heads/master
|
<file_sep>ServiceManagerID=4dc8a1f7-4ab6-4000-882c-5cb8a23acf21
TransportName=TCP
PeerTypeId=HARDWARE_SERVER
UserName=v
Port=3121
Host=127.0.0.1
OSName=Linux
ID=Local
AgentID=4dc8a1f7-4ab6-4000-882c-5cb8a23acf21
Name=Local
ServiceManagerID=4dc8a1f7-4ab6-4000-882c-5cb8a23acf21
TransportName=TCP
PeerTypeId=LINUX_TCF_AGENT
UserName=v
Port=1534
Host=192.168.0.1
OSName=Linux
ID=Linux Agent
AgentID=4dc8a1f7-4ab6-4000-882c-5cb8a23acf21
Name=Linux Agent
ServiceManagerID=4dc8a1f7-4ab6-4000-882c-5cb8a23acf21
TransportName=TCP
PeerTypeId=QEMU_TCF_GDB_CLIENT
UserName=v
Port=1138
Host=127.0.0.1
OSName=Linux
ID=QEMU
AgentID=4dc8a1f7-4ab6-4000-882c-5cb8a23acf21
Name=QEMU
<file_sep>
//#include "ps7_init.h"
//#include "platform.h"
#include "xil_io.h"
#include "xscugic.h"
#include "xparameters.h"
// usleep(miliseconds);
#include <unistd.h>
// malloc
#include <stdlib.h>
#include <stdio.h>
//FF
#include "xsdps.h"
#include "xil_printf.h"
#include "ff.h"
#include "xil_cache.h"
//~FF
XScuGic InterruptController;
static XScuGic_Config *GicConfig;
u32 global_counter = 0;
//FF
static FATFS FS_instance; // File System instance
static FIL file1; // File instance
FRESULT result; // FRESULT variable
static char FileName[32] = "BYTE.txt"; // name of the log
static char *Log_File; // pointer to the log
char *Path = "0:/home/"; // string pointer to the logical drive number
unsigned int BytesWr; // Bytes written
int len=0; // length of the string
int accum=0; // variable holding the EOF
u8 Buffer_logger[64] __attribute__ \
((aligned(32))); // Buffer should be word aligned (multiple of 4)
u32 Buffer_size = 64;
//~FF
char *location;
void ff_init(void)
{
// Mount SD Card and initialize device
result = f_mount(&FS_instance,Path, 1);
if(result != 0) return XST_FAILURE;
// Creating new file with read/write permissions
Log_File = (char *)FileName;
result = f_open(&file1, Log_File,
FA_CREATE_ALWAYS | FA_WRITE | FA_READ);
if(result!= 0) return XST_FAILURE;
}
int ff_print(const char *message, char *s)
{
sprintf(Buffer_logger,
message, s);
Log_File = (char *)FileName;
// Open file which was created
result = f_open(&file1, Log_File,FA_WRITE);
if(result!=0) return XST_FAILURE;
// Point to the end of log
result = f_lseek(&file1,accum);
if(result!=0) return XST_FAILURE;
// Write to log
result = f_write(&file1,
(const void*)Buffer_logger,
Buffer_size,&BytesWr);
if(result!=0) return XST_FAILURE;
// Increment file EOF pointer
len = strlen(Buffer_logger);
accum=accum+len;
//Close file.
result = f_close(&file1);
if(result!=0) return XST_FAILURE;
return 0;
}
///////////
// DMA //
///////////
int dma_init(void)
{
unsigned int tmp_value = 0;
//S2MM_DMACR.RS = 1
tmp_value = Xil_In32(XPAR_AXI_DMA_0_BASEADDR + 0x30);
ff_print("DMA: 1\n",0);
tmp_value = tmp_value | 0x1001;
ff_print("DMA: 2\n",0);
Xil_Out32(XPAR_AXI_DMA_0_BASEADDR + 0x30,tmp_value);
ff_print("DMA: 3\n",0);
tmp_value = Xil_In32(XPAR_AXI_DMA_0_BASEADDR + 0x30);
ff_print("DMA: initialize\n",0);
return 0;
}
void dma_transfer_start(unsigned int address, unsigned int length)
{
Xil_Out32(XPAR_AXI_DMA_0_BASEADDR + 0x48,address);
Xil_Out32(XPAR_AXI_DMA_0_BASEADDR + 0x58,length);
ff_print("DMA: transfer done!, %d\n",global_counter);
}
////////////////////////
// Sample generator //
////////////////////////
int sg_enable(unsigned int number_of_words)
{
//frame_size
Xil_Out32(XPAR_AXI_GPIO_0_BASEADDR + 0x8, number_of_words);
//enable
Xil_Out32(XPAR_AXI_GPIO_0_BASEADDR, 1);
//FF
ff_print("Sample generator: enable:%x\n",Xil_In32(XPAR_AXI_GPIO_0_BASEADDR));
ff_print("Sample generator: frame_size:%x\n",Xil_In32(XPAR_AXI_GPIO_0_BASEADDR+0x8));
//~FF
return 0;
}
//////////////////
// Interrupts //
//////////////////
void interrupt_handler(void)
{
u32 tmp_value = 0;
tmp_value = Xil_In32(XPAR_AXI_DMA_0_BASEADDR + 0x34);
ff_print("Interrupt: h1\n",0);
tmp_value = tmp_value | 0x1000;
ff_print("Interrupt: h2\n",0);
Xil_Out32(XPAR_AXI_DMA_0_BASEADDR + 0x34, tmp_value);
ff_print("Interrupt: h3\n",0);
global_counter ++;
dma_transfer_start(location, 256);
//FF
ff_print("Interrupt: handler, %d\n",global_counter);
//~FF
}
int interrupt_setup(XScuGic *XScuGicInstancePtr)
{
Xil_ExceptionRegisterHandler(XIL_EXCEPTION_ID_INT,
(Xil_ExceptionHandler)XScuGic_InterruptHandler,
XScuGicInstancePtr);
Xil_ExceptionEnable();
//FF
ff_print("Interrupt: setup\n",0);
//~FF
return XST_SUCCESS;
}
int interrupt_init(u32 device_id)
{
int status = 0;
GicConfig = XScuGic_LookupConfig(device_id);
if(NULL == GicConfig) return XST_FAILURE;
status = XScuGic_CfgInitialize(&InterruptController,
GicConfig, GicConfig->CpuBaseAddress);
if(status != XST_SUCCESS) return XST_FAILURE;
status = interrupt_setup(&InterruptController);
if(status != XST_SUCCESS) return XST_FAILURE;
ff_print("Interrupt: i1\n",0);
status = XScuGic_Connect(&InterruptController,
XPAR_FABRIC_AXI_DMA_0_S2MM_INTROUT_INTR,
(Xil_ExceptionHandler)interrupt_handler,
NULL);
if(status != XST_SUCCESS) return XST_FAILURE;
ff_print("Interrupt: i2\n",0);
XScuGic_Enable(&InterruptController,
XPAR_FABRIC_AXI_DMA_0_S2MM_INTROUT_INTR);
ff_print("Interrupt: initialize\n",0);
return XST_SUCCESS;
}
int main()
{
ff_init();
ff_print("%s\n",0);
//init_platform();
ff_print("Platform: initialize %s\n",0);
// enable PL
//ps7_post_config();
//ps7_init();
// dma
dma_init();
// sample generator
sg_enable(32);
// interrupt
interrupt_init(XPAR_PS7_SCUGIC_0_DEVICE_ID);
// dma transfer
location = malloc(sizeof(char)*32*1024);
if(location)
{
ff_print("Malloc: done\n",0);
dma_transfer_start(location, 256);
}
else
{
ff_print("Malloc: ooops...\n",0);
}
for(int i = 0; i < 9; ++i)
ff_print("DMA: data: %x \n",Xil_In32(location + 0x4*i));
free(location);
return 0;
}
<file_sep># sample-generator-project
#
#
To create this project we used Vivado v2017.2.
Keep this in mind if you want to recompile this one.
|
46a571ff8be5a6643ba7e4a50c2cc6bd06eb34ee
|
[
"Markdown",
"C",
"INI"
] | 3 |
INI
|
VadymDenysenko/sample-generator-project
|
a7a8232ec98e7472e26452703c757ff6898a7bac
|
fc308767c87b225e83442740fc03fd7edb96a3f1
|
refs/heads/master
|
<repo_name>bndao/App-Dockerized<file_sep>/createAndRunContainer.sh
#!/bin/bash
if [ -d "$PWD/MONGODATABASEBACKUP/data" ] && [ -d "$PWD/data" ];
then
echo "Please choose one Version of data"
else
if [ -d "$PWD/MONGODATABASEBACKUP" ]
then
if [ -d "$PWD/data" ]
then
echo "done 1"; mv data MONGODATABASEBACKUP/; docker-compose up --build; mv MONGODATABASEBACKUP/data .; chmod 777 -R data; sh mountVol.sh
elif [ -d "$PWD/MONGODATABASEBACKUP/data" ]
then
echo "done 2";
docker-compose up --build;
mv MONGODATABASEBACKUP/data .;
chmod 777 -R data;
sh mountVol.sh;
else
echo "Please rm -rf MONGODATABASEBACKUP"
fi
else
mkdir MONGODATABASEBACKUP
if [ -d "$PWD/data" ]
then
mv data MONGODATABASEBACKUP/; docker-compose up --build; mv MONGODATABASEBACKUP/data .; chmod 777 -R data; sh mountVol.sh
else
mkdir MONGODATABASEBACKUP/data; docker-compose up --build; mv MONGODATABASEBACKUP/data .; chmod 777 -R data; sh mountVol.sh
fi
fi
fi
<file_sep>/src/public/html/controllers/Main-ctrl.js
angular.module('mainCtrl', ['myLinks', 'myColl','myContact', 'ngRoute', 'ngAnimate'])
.controller('MainCtrl', ['$scope',
function($scope) {
$scope.templates =
[ { name: 'Collections', url: 'html/coll.html'},
{ name: 'Bookmarks', url: 'html/Bookmarks.html'},
{ name: 'Contact', url: 'html/Contact.html'} ];
$scope.template = $scope.templates[0];
}]);๏ปฟ
<file_sep>/src/server.js
var express = require('express');
var path = require('path');
var bodyParser = require('body-parser');
var cookieParser = require('cookie-parser');
var http = require('http');
var fs = require('fs');
var mongojs = require('mongojs');
// Authentication module.
var auth = require('http-auth');
var basic = auth.basic({
realm: "Nope It's Private",
file: __dirname + "/users.htpasswd"
});
// Application setup.
var app = express();
app.use(auth.connect(basic));
app.use(express.static(path.join(__dirname, 'public')));
app.use(bodyParser.json());
app.use(bodyParser.urlencoded({ extended: false }));
app.use(cookieParser());
app.disable('x-powered-by');
var list = mongojs('linkslist', []);
var ListAllColl = function() {
app.get('/CollList', function(req, res){
list.getCollectionNames(function (err, doc) {
console.log(JSON.stringify(doc));
res.json(doc);
});
});
}
ListAllColl();
var Coll = mongojs('linkslist', ['dbColl']);
var getAllColl = function() {
app.get('/dbColl', function(req, res){
Coll.dbColl.find(function (err, doc) {
console.log(doc);
res.json(doc);
});
});
}
getAllColl();
app.post('/dbColl', function(req, res){
Coll.dbColl.insert(req.body, function(err, doc) {
console.log(doc);
res.json(doc);
});
});
app.get('/dbColl/:id', function (req, res) {
var id = req.params.id;
var fd;
Coll.dbColl.findOne({_id: mongojs.ObjectId(id)}, function (err, doc) {
if (!doc){return}
console.log('my collection is ' + JSON.stringify(doc));
fd = fs.openSync(__dirname + "/test", 'r');
fs.writeFile(__dirname + "/test", JSON.stringify(doc.collection).replace(/['"]+/g, '') + '\n');
fs.closeSync(fd);
res.json(doc);
});
});
app.delete('/dbColl/:id', function (req, res) {
var id = req.params.id;
console.log(id);
Coll.dbColl.remove({_id: mongojs.ObjectId(id)}, function (err, doc) {
res.json(doc);
});
});
app.delete('/dbColl/', function (req, res) {
Coll.dbColl.drop();
console.log('db dropped');
res.json('Cool');
});
//
// Links
//
var techLinks = [];
var linksdb = "";
function getCollData(lol, next){
lol = '';
techLinks = fs.readFileSync(__dirname + "/test",'utf8').toString().split("\n");
console.log('the content of the file is ' + techLinks[0]);
linksdb = mongojs('linkslist', [techLinks[0]]);
next(linksdb);
}
function getAllLinks(res){
getCollData(techLinks, function(linksdb){
console.log('I just got the ' + techLinks[0] + ' collection ');
linksdb[techLinks[0]].find(function (err, docs) {
console.log('here is the docs == ' + JSON.stringify(docs));
res.json(docs);
})
});
}
var refreshAll = function() {
app.get('/techLinks', function (req, res) {
console.log('I received a GET request so now I call getAllLinks()');
getAllLinks(res);
});
}
// Get All
refreshAll();
// Post
app.post('/techLinks', function (req, res) {
console.log(req.body);
linksdb[techLinks[0]].insert(req.body, function(err, doc) {
res.json(doc);
});
});
// Delete
app.delete('/techLinks/:id', function (req, res) {
var id = req.params.id;
console.log(id);
linksdb[techLinks[0]].remove({_id: mongojs.ObjectId(id)}, function (err, doc) {
res.json(doc);
});
});
// Get one
app.get('/techLinks/:id', function (req, res) {
var id = req.params.id;
console.log(id);
linksdb[techLinks[0]].findOne({_id: mongojs.ObjectId(id)}, function (err, doc) {
res.json(doc);
});
});
// Update
app.put('/techLinks/:id', function (req, res) {
var id = req.params.id;
console.log(req.body.name);
linksdb[techLinks[0]].findAndModify({
query: {_id: mongojs.ObjectId(id)},
update: {$set: {name: req.body.name, desc: req.body.desc}},
new: true}, function (err, doc) {
res.json(doc);
}
);
});
//
// Contact
//
var contactdb = mongojs('contactlist', ['contactlist']);
app.get('/contactlist', function (req, res) {
console.log('I received a GET request');
contactdb.contactlist.find(function (err, docs) {
console.log(docs);
res.json(docs);
});
});
app.post('/contactlist', function (req, res) {
console.log(req.body);
contactdb.contactlist.insert(req.body, function(err, doc) {
res.json(doc);
});
});
app.delete('/contactlist/:id', function (req, res) {
var id = req.params.id;
console.log(id);
contactdb.contactlist.remove({_id: mongojs.ObjectId(id)}, function (err, doc) {
res.json(doc);
});
});
app.get('/contactlist/:id', function (req, res) {
var id = req.params.id;
console.log(id);
contactdb.contactlist.findOne({_id: mongojs.ObjectId(id)}, function (err, doc) {
res.json(doc);
});
});
app.put('/contactlist/:id', function (req, res) {
var id = req.params.id;
console.log(req.body.name);
contactdb.contactlist.findAndModify({
query: {_id: mongojs.ObjectId(id)},
update: {$set: {name: req.body.name, email: req.body.email, number: req.body.number}},
new: true}, function (err, doc) {
res.json(doc);
}
);
});
// Create an HTTP service.
http.createServer(app).listen(8000);
module.exports = app;
<file_sep>/src/public/html/controllers/Coll-ctrl.js
angular.module('myColl', ['ngRoute', 'myLinks', 'ngAnimate'])
.controller('CollCtrl', ['$scope', '$http',
function($scope, $http) {
$scope.selected = "Get a Collection then Fire and Edit the Data !!!";
var listColl = function() {
$http.get('/CollList').success(function(response) {
$scope.CollList = response;
});
};
$scope.displayColl = listColl();
var updateColl = function() {
$http.get('/dbColl').success(function(response) {
console.log(response);
$scope.dbColl = response;
$scope.coll = "";
listColl();
});
};
updateColl();
$scope.addColl = function() {
console.log($scope.coll);
$http.post('/dbColl', $scope.coll).success(function(response) {
console.log(response);
updateColl();
});
};
$scope.selectColl = function(id) {
console.log(id);
$http.get('/dbColl/' + id).success(function(response) {
$scope.coll = "";
$scope.selected = "you have selected the " + response.collection + " collection";
});
};
$scope.delColl = function(id) {
console.log(id);
$http.delete('/dbColl/' + id).success(function(response) {
updateColl();
});
};
$scope.dropAll = function() {
$http.delete('/dbColl/').success(function(response) {
console.log(response + " all Collections Dropped !!");
updateColl();
});
};
}]);๏ปฟ
<file_sep>/README.md
# LinksApp
Private Docker WebApp to manage/save links/youtube videos
## Prerequisite
[docker](https://www.docker.com/)
### Dockerize it
`sudo sh createAndRunContainer.sh`
<file_sep>/src/public/html/controllers/links-ctrl.js
angular.module('myLinks', ['ngRoute', 'ngAnimate'])
.config(function($routeProvider){
$routeProvider.when('/', {
controller: 'TechCtrl',
templateUrl: 'html/links.html'
})
.otherwise('/');
})
.factory('rightWay', [ function(){
var o = {
toggleSidebar: function() {
$("#wrapper").toggleClass("toggled");
}
};
return o;
}])
.controller('TechCtrl', ['$scope', '$http', 'rightWay',
function($scope, $http, rightWay) {
var refresh = function() {
$http.get('/techLinks').success(function(response) {
console.log(response);
$scope.techLinks = response;
$scope.tech = "";
});
};
refresh();
$scope.firedata = refresh;
$scope.addLinks = function() {
console.log($scope.tech);
if ($scope.tech.desc && $scope.tech.name) {
$http.post('/techLinks/', $scope.tech).success(function(response) {
console.log(response);
refresh();
});
} else {
alert('nope');
}
};
$scope.remove = function(id) {
console.log(id);
$http.delete('/techLinks/' + id).success(function(response) {
refresh();
});
};
$scope.edit = function(id) {
console.log(id);
$http.get('/techLinks/' + id).success(function(response) {
$scope.tech = response;
});
};
$scope.update = function() {
console.log($scope.tech._id);
$http.put('/techLinks/' + $scope.tech._id, $scope.tech).success(function(response) {
refresh();
})
};
$scope.deselect = function() {
$scope.tech = "";
}
$scope.displayVids = function(code) {
var div = document.getElementById("youtube");
div.innerHTML = "\n\t\t<iframe width=\"80%\" height=\"70%\"\n\t\tsrc=\"http://www.youtube.com/embed/" + code + "?autoplay=1\">\n\t\t</iframe>";
rightWay.toggleSidebar();
};
}])
.run(function ($templateCache){
$templateCache.put('html/links.html');
});
<file_sep>/src/public/html/controllers/contact-ctrl.js
angular.module('myContact', ['ngRoute', 'ngAnimate'])
.config(function($routeProvider){
$routeProvider.when('/Contact', {
controller: 'contactCtrl',
templateUrl: 'html/Contact.html'
})
.otherwise('/');
})
.controller('contactCtrl', ['$scope', '$http', function($scope, $http) {
var refresh = function() {
$http.get('/contactlist').success(function(response) {
console.log("I got the data I requested");
$scope.contactlist = response;
$scope.contact = "";
});
};
refresh();
$scope.addContact = function() {
console.log($scope.contact);
$http.post('/contactlist/', $scope.contact).success(function(response) {
console.log(response);
refresh();
});
};
$scope.remove = function(id) {
console.log(id);
$http.delete('/contactlist/' + id).success(function(response) {
refresh();
});
};
$scope.edit = function(id) {
console.log(id);
$http.get('/contactlist/' + id).success(function(response) {
$scope.contact = response;
});
};
$scope.update = function() {
console.log($scope.contact._id);
$http.put('/contactlist/' + $scope.contact._id, $scope.contact).success(function(response) {
refresh();
})
};
$scope.deselect = function() {
$scope.contact = "";
}
}])
๏ปฟ
.run(function ($templateCache){
$templateCache.put('html/Contact.html');
});
<file_sep>/src/public/javascripts/scripts/forceUserInput.js
function setKey(tried) {
var spellit;
if (tried == '1')
spellit = ' retry';
else
spellit = ' retries'
var num = prompt('Please find the code to be left alone. ' + tried + spellit + ' left');
localStorage.setItem('id', num);
}
function forceKey() {
var urName = prompt("Hey Welcome on this Website let's get started but first You must confirm your nickname");
while (!urName || urName.length >= 20)
urName = prompt("Hey just choose a nickname");
if (!urName)
urName = 'Work';
localStorage.setItem('name', urName);
var myHeading = document.getElementById('tochange1');
myHeading.innerHTML = 'Development is Cool';
var center = document.querySelector('h2');
center.innerHTML = 'Click Here ' + urName;
if (localStorage.getItem('name') != 'ok')
{
for (var i = 1337; i > 0; i -= 1)
{
setKey(i);
if (i == localStorage.getItem('id'))
break;
}
if (localStorage.getItem('id') != i)
alert("You failed try again");
else
alert("so leet...");
}
localStorage.setItem('id', null);
localStorage.setItem('name', null);
}
<file_sep>/src/addNpmPkg.sh
echo -n "which one ? "
read answer
npm i -S $answer
echo "\033[31mAdded !\033[0m"
read a
clear
<file_sep>/src/public/javascripts/scripts/clickEvents.js
function loadScript(url, callback) {
var head = document.getElementsByTagName('head')[0];
var script = document.createElement('script');
script.type = 'text/javascript';
script.src = url;
// There are several events for cross browser compatibility.
script.onreadystatechange = callback;
script.onload = callback;
// Fire the loading
head.appendChild(script);
}
function enigmeOnClick(num) {
document.addEventListener('DOMContentLoaded', function() {
var link = document.getElementById('tochange1');
link.addEventListener('click', function() {
loadScript('/scripts/forceUserInput.js', setKey(num));
});
});
}
function toggleSidebar() {
document.addEventListener('DOMContentLoaded', function() {
var link = document.getElementById('sidebar-wrapper');
link.addEventListener('click', function() {
$("#wrapper").toggleClass("toggled");
});
});
}
// let it shine
function bounceUp() {
document.addEventListener('DOMContentLoaded', function() {
var frame = document.getElementById('hidden');
frame.addEventListener('click', function() {
document.getElementById('hidden').style.border = '5px solid white';
document.getElementById('list').style.background = 'transparent'
document.getElementById('hidden').style.background = '#FFFFFF';
$(frame).animateCssOnce('fadeInUp');
var frame = document.getElementById('hidden');
$(frame).animateCss('rubberBand');
});
});
}
function textAnim(classe, effect) {
document.addEventListener('DOMContentLoaded', function() {
var txt = document.querySelector(classe);
txt.addEventListener('click', function() {
$(txt).animateCss(effect);
});
});
}
function textChangeOnClick(id, text) {
document.addEventListener('DOMContentLoaded', function() {
var txt = document.getElementById(id);
txt.addEventListener('click', function() {
txt.innerHTML = text;
});
});
}
function imageSwitcher() {
document.addEventListener('DOMContentLoaded', function() {
var myImage = document.querySelector('img');
myImage.addEventListener('click', function() {
var mySrc = myImage.getAttribute('src');
if (mySrc === 'images/firefox2.png') {
myImage.setAttribute ('src','images/firefox-icon.png');
} else {
myImage.setAttribute ('src','images/firefox2.png');
}
});
});
}
function firePage(locationId, page, id) {
document.addEventListener('DOMContentLoaded', function() {
var elem = document.getElementById(id);
elem.addEventListener('click', function() {
loadPage_(page, locationId);
});
});
}
function loadPage_(page, id){
var test_page = page;
var content_div = document.getElementById(id);
var xmlHttp = new XMLHttpRequest();
xmlHttp.onreadystatechange = function() {
if (xmlHttp.readyState == 4 && xmlHttp.status == 200)
content_div.innerHTML = xmlHttp.responseText;
}
xmlHttp.open("GET", test_page, true); // true for asynchronous
xmlHttp.send(null);
}
$("#menu-toggle").click(function(e) {
e.preventDefault();
$("#wrapper").toggleClass("toggled");
});
<file_sep>/src/public/javascripts/scripts/main.js
forceKey();
enigmeOnClick('1');
// imageSwitcher();
textAnim('.toanimate', 'shake');
textChangeOnClick('tochange2', 'Welcome in !');
bounceUp();
$("#menu-toggle").click(function(e) {
e.preventDefault();
$("#wrapper").toggleClass("toggled");
});
toggleSidebar();
|
0da04872d68244ca4dcecef94f4830231aa2b4f0
|
[
"JavaScript",
"Markdown",
"Shell"
] | 11 |
Shell
|
bndao/App-Dockerized
|
724cef30c59a3b3815c01f7a8bc41efe177c5fb4
|
d1fecfde9f9978730535ff4892aa870f2ee45ae7
|
refs/heads/master
|
<repo_name>navinkumar357/FFT-calculation-using-FPGA-fabrics<file_sep>/README.md
# PLC Group 10
PLC Lab group 10 Uppsala University Embedded Systems 2019-20
<file_sep>/Project/Final.c
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include "inReal.h"
#define len 16
typedef struct cn
{
float real;
float imag;
} cmplx;
//Complex Multiplication
cmplx cadd(cmplx a , cmplx b){
cmplx c;
c.real = a.real + b.real;
c.imag = a.imag + b.imag;
return c;
}
cmplx csub(cmplx a , cmplx b){
cmplx c;
c.real = a.real - b.real;
c.imag = a.imag - b.imag;
return c;
}
cmplx cmul(cmplx a , cmplx b){
cmplx c;
c.real = a.real * b.real - a.imag * b.imag;
c.imag = a.real * b.imag + b.real * a.imag;
return c;
}
void populate(cmplx *arr){
for(int i=0; i < len; i++){
arr[i].real = inReal[i];
arr[i].imag = 0;
}
}
void cooley_tukey(cmplx *arr, cmplx *oparr)
{
cmplx tmparr[len];
int n=log2(len), bin[n], storarr[n], nos[n];
int half = len/2;
for (int i = 0; i < len; i++){
int a = i, val = 0, base = 1;
for(int j = 0; j < n; j++){
bin[j] = a & 0x01 ? 1 : 0;
//printf("%d ", bin[j]);
a>>=1;
}
printf("\n");
for (int k = n-1; k > -1; k--){
val = val + bin[k] * base;
base = base * 2;
}
printf("New Address Order: ");
printf("%d ", val);
tmparr[val].real = arr[i].real;
tmparr[val].imag = arr[i].imag;
}
printf("\n \n");
for (int i = 0; i < len; i++){
oparr[i].real = tmparr[i].real;
oparr[i].imag = tmparr[i].imag;
}
//Finished Bit reversal.
printf("Reordered Array vs Original Array:\n");
for(int y=0; y<len; y++){
printf("%f + %fi || %f + %fi\n", oparr[y].real, oparr[y].imag, arr[y].real, arr[y].imag);
}
printf("\n\n");
for (int s = 1; s <= n; ++s){
int m = 1 << s;
int m2 = m >> 1;
cmplx w;
w.real = 1;
w.imag = 0;
cmplx wm;
wm.real = cos(M_PI/m2);
wm.imag = -sin(M_PI/m2);
for (int j = 0; j < m2; ++j)
{
for (int k = j; k < len; k += m)
{
cmplx t, u;
t = cmul(w,oparr[k + m2]);
u = oparr[k];
oparr[k] = cadd(u, t);
oparr[k + m2] = csub(u, t);
}
w = cmul(w,wm);
}
}
}
//do dft like in recursive by calculating dft of strides of two then four till len/2 to calculate DFT
void main()
{
cmplx array[len], oparray[len];
int stride, half = len/2;
populate(array);
printf("\n");
/*printf("Initial Array \n");
for (int j = 0; j < len; j++)
{
printf("%1.1f ", array[j].real);
if(j==len-1)
printf("\n \n");
}*/
cooley_tukey(array,oparray);
printf("Output Array: \n");
for (int j = 0; j < len; j++)
{
printf("%f + %fi, \n", oparray[j].real, oparray[j].imag);
if(j==len-1)
printf("\n \n");
}
}
<file_sep>/Project/Resources/readme.txt
MiniZed_SPI_and_I2C_Ver01.zip can be downloaded from the Studentportalen.
It is an example project for reading the temperature sensor & accelerometer over the I2C bus (Vivado 2018.2)<file_sep>/Seminar/readme.txt
Seminar Topic:
CH-1: Section 4.4, Coarse-Grain Reconfigurable Arrays
Page 30 in the PDF<file_sep>/Project/fft.c
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <complex.h>
#undef I
#define j _Complex_I
#define N 8
double complex arr[N];
void populate(){
srand(N);
for (int i = 0; i < N; i++) {
arr[i] = rand();
printf(" %lf + %lfi", creal(arr[i]), cimag(arr[i]));
}
printf("\n");
printf("Initialized Array\n");
}
int oddoreven(int n)
{
return n%2==0;
}
void cooley_tukey(double complex *arr, int len)
{
int odd = 0, even = 0, n[len], half, factor;
double complex oddarr[len/2], evenarr[len/2];
if(len>2)
{
for(int i=0; i<len; i++)
{
if(!oddoreven(i)){
oddarr[odd++] = arr[i];
}
else{
evenarr[even++] = arr[i];
}
}
cooley_tukey(oddarr,len/2);
cooley_tukey(evenarr,len/2);
}
else
{
oddarr[0]=arr[1];
evenarr[0]=arr[0];
}
half = len/2;
double complex t;
for (int k = 0; k < len/2; k++) {
t = (cexp(-2 * j * M_PI * k / len)) * oddarr[k];
arr[k] = evenarr[k] + t;
arr[len/2 + k] = evenarr[k] - t;
}
}
void main(){
populate();
cooley_tukey(arr,N);
for (int i = 0; i < N; i++) {
printf(" %lf + %lfi \n", creal(arr[i]), cimag(arr[i]));
}
printf("\n");
printf("\n");
printf("FFT result \n");
}
<file_sep>/Project/cppnew.c
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <complex.h>
#include "inReal.h"
#define len 16
typedef struct cn
{
float real;
float imag;
} cmplx;
//Complex Multiplication
cmplx cadd(cmplx a , cmplx b){
cmplx c;
c.real = a.real + b.real;
c.imag = a.imag + b.imag;
return c;
}
cmplx csub(cmplx a , cmplx b){
cmplx c;
c.real = a.real - b.real;
c.imag = a.imag - b.imag;
return c;
}
cmplx cmul(cmplx a , cmplx b){
cmplx c;
c.real = a.real * b.real - a.imag * b.imag;
c.imag = a.real * b.imag + b.real * a.imag;
return c;
}
void populate(cmplx *arr){
for(int i=0; i < len; i++){
arr[i].real = inReal[i];
arr[i].imag = 0;
}
}
void cooley_tukey(cmplx *arr, cmplx *oparr)
{
cmplx tmparr[len];
int n=log2(len), bin[n], storarr[n], nos[n];
int half = len/2;
cmplx wm;
for (int i = 0; i < len; i++){
int a = i, val = 0, base = 1;
for(int j = 0; j < n; j++){
bin[j] = a & 0x01 ? 1 : 0;
//printf("%d ", bin[j]);
a>>=1;
}
printf("\n");
for (int k = n-1; k > -1; k--){
val = val + bin[k] * base;
base = base * 2;
}
printf("New Address Order: ");
printf("%d ", val);
tmparr[val].real = arr[i].real;
tmparr[val].imag = arr[i].imag;
}
printf("\n \n");
for (int i = 0; i < len; i++){
oparr[i].real = tmparr[i].real;
oparr[i].imag = tmparr[i].imag;
}
//Finished Bit reversal.
printf("Reordered Array vs Original Array:\n");
for(int y=0; y<len; y++)
{
printf("%f + %fi || %f + %fi\n", oparr[y].real, oparr[y].imag, arr[y].real, arr[y].imag);
}
printf("\n\n");
cmplx t, J, w, u;
//const complex<double> J(0, 1);
J.real = 0;
J.imag = 1;
t.real = 0;
t.imag = 0;
wm.real = 0;
wm.imag = 0;
for (int s = 1; s <= log2(len) ; ++s)
{
int m = 1 << s; // 2 power s
int m2 = m>>1; // m2 = m/2 -1
//cd w(1, 0);
w.real = 1;
w.imag = 0;
//cd wm = exp(J * (PI / m2));
//wm = cexpf(mul.imag);
wm.real = cos((J.real) * 2*(M_PI / m));
wm.imag = -sin((J.imag) * 2*(M_PI / m));
for (int j = 0; j < m2; ++j)
{
for (int k = j; k < len; k += m)
{
// t = twiddle factor
//cd t = w * A[k + m2];
//cd u = A[k];
t = cmul(w, oparr[k+m2]);
u = oparr[k];
// similar calculating y[k]
//A[k] = u + t;
oparr[k] = cadd(u,t);
// similar calculating y[k+n/2]
//A[k + m2] = u - t;
oparr[k + m2] = csub(u,t);
}
//w *= wm;
w = cmul(w,wm);
}
}
printf("The result:");
for (int j = 0; j < len; j++)
{
printf("%1.1f + %1.1fi, \n", oparr[j].real, oparr[j].imag);
if(j==len-1)
printf("\n \n");
}
}
//do dft like in recursive by calculating dft of strides of two then four till len/2 to calculate DFT
void main()
{
cmplx array[len], oparray[len];
int stride, half = len/2;
populate(array);
printf("\n");
printf("Initial Array \n");
/*for (int j = 0; j < len; j++)
{
printf("%1.1f ", array[j].real);
if(j==len-1)
printf("\n \n");
}*/
cooley_tukey(array,oparray);
}
|
667ea2761aefe7fe311e6366038c4360b3aed208
|
[
"Markdown",
"C",
"Text"
] | 6 |
Markdown
|
navinkumar357/FFT-calculation-using-FPGA-fabrics
|
9eac2955ca6c20bc2a399343dae4a8989bfeaa66
|
9963d6c525df804fb2ae0eba2bd16661930e3055
|
refs/heads/master
|
<file_sep> function myFunction() {
var symbol = document.getElementById("form1").elements[0].value;
//console.log(symbol);
var callback = function(data) {
var price=data.query.results.span.content;
var changeMe = document.getElementById("stockprice");
changeMe.innerHTML = price;
console.log(price);
};
var url = 'http://query.yahooapis.com/v1/public/yql?';
//var data= "q=select%20*%20from%20html%20where%20url%20%3D%22http%3A%2F%2Ffinance.yahoo.com%2Fq%3Fs%3Daapl%22%20and%20xpath%3D%20%22%2F%2Fspan%5B%40id%3D'yfs_l84_aapl'%5D%22&format=json&diagnostics=true&callback=";
var data= "q=select%20*%20from%20html%20where%20url%20%3D%22http%3A%2F%2Ffinance.yahoo.com%2Fq%3Fs%3D" + symbol + "%22%20and%20xpath%3D%20%22%2F%2Fspan%5B%40id%3D'yfs_l84_" + symbol + "'%5D%22&format=json&diagnostics=true&callback=";
$.getJSON(url, data, callback);
}<file_sep># Stock
A chrome extension that allows you to quickly search for a stock price.
Made using HTML and Javascript
|
d1fbed55df93208fc6cc07465104e829d0609010
|
[
"JavaScript",
"Markdown"
] | 2 |
JavaScript
|
meghanarora/Stock
|
c90736338f7c5ff146aed51bec116bb98bb3a494
|
7661775fc9382fdc4c0160f8e392bd2db0389e88
|
refs/heads/master
|
<repo_name>samanta-scavassa/pizzaria<file_sep>/src/br/com/poo/Main.java
package br.com.poo;
import br.com.poo.model.Ingredientes;
import br.com.poo.model.Pizzaria;
import br.com.poo.model.Sabor;
import javax.swing.*;
import java.awt.*;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
/**
*
* @author <NAME>
* @author <NAME>
*/
public class Main {
public static void main(String[] args) {
JFrame f = new JFrame();
f.setSize(900, 800);
JLabel Nome = new JLabel("Digite seu nome: ");
JTextField Name = new JTextField();
JTextField Tel = new JTextField();
JLabel Telefone = new JLabel("Digite seu telefone: ");
JTextField Pizza = new JTextField();
JLabel SaborPizza = new JLabel("Digite o sabor da pizza(0 - Marguerita, 1 - Capricciosa, "
+ "2 - Diavolo, 3 - Funghi ou 4 - Prosciutto):");
JTextField Tam = new JTextField();
JLabel Tamanho = new JLabel("Digite o tamanho da pizza(P, M, G): ");
f.add(Nome);
f.add(Name);
f.add(Telefone);
f.add(Tel);
f.add(SaborPizza);
f.add(Pizza);
f.add(Tamanho);
f.add(Tam);
f.add(new JLabel());
JButton Confirmar = new JButton();
Confirmar.setText("Confirmar");
Confirmar.setForeground(Color.BLUE);
JButton Cancelar = new JButton();
Cancelar.setText("Cancelar");
Cancelar.setForeground(Color.BLUE);
f.add(Confirmar);
f.add(Cancelar);
f.setLayout(new GridLayout(20, 20));
f.setVisible(true);
f.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE);
Confirmar.addActionListener(new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
Ingredientes s = new Ingredientes();
Sabor sab = new Sabor();
Pizzaria novoPedido = new Pizzaria(Tam.getText(), Integer.parseInt(Pizza.getText()), s);
JFrame p = new JFrame();
p.setSize(800, 800);
JLabel Pedido = new JLabel();
Pedido.setText("<html>" + "Dados do pedido:<br/> " + "Cliente:<br/>" + Name.getText()+ "<br/>Telefone:<br/>"
+ Tel.getText() + "<br/> Sabor da Pizza:<br/>" + sab.retornaSabor(novoPedido.getSabor()) + "<br/>Ingredientes da Pizza\t:<br/>"
+ s.ingredientePizza(Integer.parseInt(Pizza.getText())) + "<br/>Preรงo do Pedido:<br/>R$" + novoPedido.calcularValor()
+ "<br/>Data do Pedido:<br/>" + novoPedido.dataPedido());
p.add(Pedido);
p.setLayout(new GridLayout(3, 10));
p.setVisible(true);
p.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE);
}
}
);
Cancelar.addActionListener(
new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
JFrame c = new JFrame();
c.setSize(900, 800);
JLabel Cancel = new JLabel();
Cancel.setText("Pedido Cancelado");
c.add(Cancel);
c.setLayout(new GridLayout(10, 10));
c.setVisible(true);
c.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE);
f.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE);
}
}
);
}
}
<file_sep>/src/br/com/poo/model/Pedido.java
package br.com.poo.model;
public interface Pedido {
public double calcularValor();
public String dataPedido();
}
<file_sep>/src/br/com/poo/model/Sabor.java
package br.com.poo.model;
public class Sabor {
private String [] sabores = {"Marguerita", "Capricciosa", "Diavolo", "Funghi", "Prosciutto"};
public String[] getSabores() {
return sabores;
}
public String retornaSabor(int nSabor){
String saborEscolhido = "";
saborEscolhido = sabores[nSabor];
return saborEscolhido;
}
}
|
69f4d4c444c8a4d1f87228f3cb0c55beff9ee0eb
|
[
"Java"
] | 3 |
Java
|
samanta-scavassa/pizzaria
|
84d0e5e45de28615de3d4183509b6056244ea2df
|
0c5e32372df91c178b67c6531f6101a8ee256205
|
refs/heads/master
|
<file_sep>'use strict';
var map = require('map-stream');
var es = require('event-stream');;
var gutil = require('gulp-util');
var Hogan = require('hulkster');
module.exports = function(data) {
data = data || {};
return es.map(function (file, cb) {
var compiled = Hogan.compile(file.path, {minify: 'true'});
file.contents = new Buffer( compiled.template );
file.path = gutil.replaceExtension(file.path, '.js');
cb(null,file);
});
};
|
3f5d01c904964ba224ae9c7586be948a97a1ab16
|
[
"JavaScript"
] | 1 |
JavaScript
|
wyattjoh/gulp-hogan
|
293add37556a1230377da8f9089d9395ba41b068
|
5fcbadfe9e06339218be94046a4ab7028b9c27a0
|
refs/heads/master
|
<repo_name>soul9/markov.go<file_sep>/libmarkov/markov_test.go
package markov
import (
"bufio"
"fmt"
"io"
"os"
"path/filepath"
"testing"
)
func newm() (*Markov, error) {
m, e := NewMarkov("testdata/testmarkov.db", "testmarkov")
if e != nil {
return nil, e
}
m.Open()
return m, nil
}
func delm(m *Markov) error {
var rete error
e := m.Close()
if e != nil {
rete = fmt.Errorf("del: %s", e)
}
fs, e := filepath.Glob("testdata/testmarkov.db*")
if e != nil {
return fmt.Errorf("%s, %s", rete, e)
}
for i := range fs {
e = os.Remove(fs[i])
if e != nil {
rete = fmt.Errorf("%s, %s", rete, e)
}
}
return rete
}
func TestPopulateFromFile(t *testing.T) {
m, e := newm()
if e != nil {
t.Fatal("newm:", e)
}
e = m.PopulateFromFile("testdata/lipsum.txt", true)
if e != nil {
t.Error(e)
}
e = delm(m)
if e != nil {
t.Error("delm", e)
}
}
func TestChainmark(t *testing.T) {
m, e := newm()
if e != nil {
t.Fatal("newm:", e)
}
e = m.PopulateFromFile("testdata/lipsum.txt", true)
if e != nil {
t.Fatal("PopulateFromFile", e)
}
s, e := m.Chainmark("lorem", 10, 5)
if (e != nil) && (s == "") {
t.Error("Chainmark:", e)
} else if e != nil {
t.Log("Chainmark:", e)
}
e = delm(m)
if e != nil {
t.Error("delm", e)
}
}
func TestAddString(t *testing.T) {
m, e := newm()
if e != nil {
t.Fatal("newm:", e)
}
f, e := os.Open("testdata/lipsum.txt")
if e != nil {
t.Fatal("open:", e)
}
r := bufio.NewReader(f)
for line, e := r.ReadString('\n'); e != io.EOF && e == nil; line, e = r.ReadString('\n') {
e = m.AddString(line, true)
if e != nil {
t.Error("AddString:", e)
}
}
f.Close()
s, e := m.Chainmark("notexist", 10, 5)
if e == nil {
t.Error("Chainmark should have error:", e)
} else if s != "notexist" {
t.Errorf("Chainmark result should be empty. result: %s, error: %s", s, e)
}
if e != ErrNoWords {
t.Errorf("Error should be ErrNoWords, it is instead: %s", e)
}
f, e = os.Open("testdata/lipsum.txt")
if e != nil {
t.Fatal("open:", e)
}
defer f.Close()
r = bufio.NewReader(f)
for line, e := r.ReadString('\n'); e != io.EOF && e == nil; line, e = r.ReadString('\n') {
e = m.AddString(line, true)
if e != nil {
t.Error("AddString:", e)
}
}
s, e = m.Chainmark("lorem", 10, 5)
if (e != nil) && (s == "") {
t.Error("Chainmark:", e)
} else if e != nil {
t.Log("Chainmark:", e)
}
e = delm(m)
if e != nil {
t.Error("delm", e)
}
}
<file_sep>/libmarkov/markov.go
package markov
import (
"bufio"
"database/sql"
"errors"
"fmt"
_ "github.com/mattn/go-sqlite3"
"io"
"math/rand"
"os"
"strings"
"time"
)
//because of sql i need to do a const dammit
const (
Maxindex = 10
MaxWords = 100
commitlen = 5000
)
var (
ErrNotEnoughWords = errors.New("Couldn't chain enough words")
ErrNoWords = errors.New("No words found")
smartsep = []rune{'.', '!', '?'}
)
type Markov struct {
db *sql.DB
tablename string
dbfile string
}
func MarkSQLType() string {
s := "(word TEXT,"
for i := 1; i < Maxindex; i++ {
s = fmt.Sprintf("%s idx%d,", s, i)
}
s = fmt.Sprintf("%s idx%d)", s, Maxindex)
return s
}
type TableName string
func MarkSqlIndex() []func(TableName) string {
r := make([]func(TableName) string, 0)
for i := 1; i <= Maxindex; i++ {
s := "("
s1 := fmt.Sprintf("index%d", i)
for j := i; j < Maxindex; j++ {
s = fmt.Sprintf("%sidx%d, ", s, j)
}
s = fmt.Sprintf("%sidx%d)", s, Maxindex)
f := func(t TableName) string {
return fmt.Sprintf("CREATE INDEX IF NOT EXISTS '%s_%s' ON '%s' %s;", t, s1, t, s)
}
r = append(r, f)
}
return r
}
func NewMarkov(dbfile, tablename string) (*Markov, error) {
m := &Markov{}
m.tablename = tablename
m.dbfile = dbfile
e := m.Open()
defer m.Close()
if e == nil {
_, e = m.db.Exec(fmt.Sprintf("CREATE TABLE IF NOT EXISTS '%s' %s;", m.tablename, MarkSQLType()))
if e != nil {
return m, e
}
for _, f := range MarkSqlIndex() {
_, e = m.db.Exec(f(TableName(tablename)))
if e != nil {
return m, e
}
}
}
return m, e
}
func (m *Markov) Open() error {
var err error
m.db, err = sql.Open("sqlite3", m.dbfile)
return err
}
func (m *Markov) Close() error {
e := m.db.Close()
m.db = nil
return e
}
func (m *Markov) Populate(toadd *bufio.Reader, smart bool) error {
return Populate(m.db, m.tablename, toadd, smart)
}
func (m *Markov) AddString(toadd string, smart bool) error {
return AddString(m.db, m.tablename, toadd, smart)
}
func (m *Markov) PopulateFromFile(fname string, smart bool) error {
return PopulateFromFile(m.db, m.tablename, fname, smart)
}
func (m *Markov) Chainmark(s string, l int, idxno int) (string, error) {
return Chainmark(m.db, m.tablename, s, l, idxno)
}
func prepareTx(db *sql.DB, qstr string) (*sql.Tx, *sql.Stmt, error) {
tx, err := db.Begin()
if err != nil {
return nil, nil, err
}
st, err := tx.Prepare(qstr)
if err != nil {
st.Close()
tx.Commit()
return nil, nil, fmt.Errorf("Problem with sql statement: %s: %s", qstr, err)
}
return tx, st, nil
}
func trimsmart(r rune) bool {
for _, s := range smartsep {
if r == s {
return true
}
}
return false
}
func Populate(db *sql.DB, tablename string, toadd *bufio.Reader, smart bool) error {
w := make([]interface{}, Maxindex+1)
qstr := fmt.Sprintf("INSERT INTO '%s' (idx1", tablename)
// idx2,idx3,idx4,idx5,idx6,idx7,idx8,idx9,idx10, word) values(?,?,?,?,?,?,?,?,?,?,?);"
for i := 2; i <= Maxindex; i++ {
qstr = fmt.Sprintf("%s, idx%d", qstr, i)
}
qstr = fmt.Sprintf("%s, word) values(?", qstr)
for i := 1; i <= Maxindex; i++ {
qstr = fmt.Sprintf("%s, ?", qstr)
}
qstr = fmt.Sprintf("%s);", qstr)
for i := 0; i < len(w); i++ {
w[i] = " "
}
commit := 0
var tx *sql.Tx
var st *sql.Stmt
var err error
for line, err := toadd.ReadString('\n'); err != io.EOF && err == nil; line, err = toadd.ReadString('\n') {
if commit%commitlen == 0 {
tx, st, err = prepareTx(db, qstr)
if err != nil {
return err
}
}
commit++
for _, ww := range strings.Split(line, " ") {
if ww == "" {
continue
}
w[len(w)-1] = strings.TrimFunc(strings.ToLower(strings.TrimSpace(ww)), trimsmart)
_, err = st.Exec(w...)
if err != nil {
st.Close()
e := tx.Commit()
if e != nil {
err = fmt.Errorf("%s, commit: %s", err, e)
}
return fmt.Errorf("Couldn't execute sql statement: %s: %s", qstr, err)
}
for i := 0; i < len(w)-1; i++ {
w[i] = w[i+1]
}
if smart {
sw := ww[len(ww)-1:]
//Makes the algorithm a little bit "smarter". This makes it "see" phrases
if sw == "." || sw == "!" || sw == "?" {
for i := 0; i < Maxindex; i++ {
w[i] = " "
}
}
}
}
if commit%commitlen == 0 {
st.Close()
err = tx.Commit()
if err != nil {
return err
}
}
}
st.Close()
e := tx.Commit()
if e != nil {
if err != nil {
err = fmt.Errorf("%s, commit: %s", err, e)
} else {
err = e
}
}
if err != nil {
return err
}
return nil
}
func AddString(db *sql.DB, tablename string, toadd string, smart bool) error {
r := bufio.NewReader(strings.NewReader(toadd))
err := Populate(db, tablename, r, smart)
return err
}
func PopulateFromFile(db *sql.DB, tablename string, fname string, smart bool) error {
f, err := os.Open(fname)
if err != nil {
return err
}
defer f.Close()
r := bufio.NewReader(f)
err = Populate(db, tablename, r, smart)
return err
}
func Chainmark(db *sql.DB, tablename string, s string, l int, idxno int) (string, error) {
tidyret := func(s []string) string {
return strings.TrimSpace(strings.Join(s, " "))
}
if idxno > Maxindex {
return "", errors.New("Given index count is larger than the maximum allowable index")
}
if l > MaxWords {
return "", errors.New("Too many words requested")
}
rand.Seed(time.Now().UnixNano())
splitab := strings.Split(strings.TrimSpace(strings.ToLower(s)), " ")
for i := range splitab {
splitab[i] = strings.TrimFunc(splitab[i], trimsmart)
}
retab := make([]string, l+len(splitab))
copy(retab, splitab)
w := make([]string, idxno)
for i := range w {
w[i] = " "
}
if len(splitab) < idxno {
for i, elm := range splitab {
w[len(w)-i-1] = elm
retab[i] = elm
}
} else {
copy(w, splitab[len(splitab)-idxno:])
copy(retab, splitab)
}
for i := len(splitab); i < l+len(splitab); i++ {
qstr := fmt.Sprintf("from '%s' WHERE", tablename)
empty := true
tmpt := make(map[int]string)
if w[0] != " " {
qstr = fmt.Sprintf("%s idx%d=?", qstr, Maxindex-idxno+1)
empty = false
tmpt[len(tmpt)] = w[0]
}
for i := 1; i < idxno; i++ {
if w[i] != " " {
if !empty {
qstr = fmt.Sprintf("%s AND", qstr)
}
qstr = fmt.Sprintf("%s idx%d=?", qstr, Maxindex-idxno+i+1)
tmpt[len(tmpt)] = w[i]
empty = false
}
}
qstr = fmt.Sprintf("%s;", qstr)
tmps := make([]interface{}, len(tmpt))
for i := 0; i < len(tmpt); i++ {
tmps[i] = tmpt[i]
}
st, err := db.Prepare(fmt.Sprintf("SELECT count(word) %s", qstr))
if err != nil {
return tidyret(retab), fmt.Errorf("Couldn't prepare statement: SELECT count(word) %s: %s", qstr, err)
}
res, err := st.Query(tmps...)
if err != nil {
st.Close()
return tidyret(retab), fmt.Errorf("exec statement: SELECT count(word) %s: %s", qstr, err)
}
var cnt int
if res.Next() {
res.Scan(&cnt)
}
if cnt == 0 {
res.Close()
st.Close()
switch tidyret(retab[len(splitab):]) {
case "":
return tidyret(retab), ErrNoWords
default:
return tidyret(retab), ErrNotEnoughWords
}
}
res.Close()
st.Close()
st, err = db.Prepare(fmt.Sprintf("SELECT word %s", qstr))
if err != nil {
return tidyret(retab), fmt.Errorf("Couldn't prepare statement: SELECT word %s: %s", qstr, err)
}
res, err = st.Query(tmps...)
if err != nil {
res.Close()
st.Close()
return tidyret(retab), fmt.Errorf("exec statement: SELECT word %s: %s", qstr, err)
}
rnd := rand.Intn(cnt)
var c string
res.Next()
for i := 0; i < rnd-1; i++ {
if !res.Next() {
res.Close()
st.Close()
return tidyret(retab), res.Err()
}
}
for i := 0; i < idxno-1; i++ {
w[i] = w[i+1]
}
res.Scan(&c)
retab[i] = c
w[len(w)-1] = retab[i]
res.Close()
st.Close()
}
return tidyret(retab), nil
}
<file_sep>/markov.go
package main
import (
"flag"
"fmt"
_ "github.com/mattn/go-sqlite3"
markov "github.com/soul9/markov.go/libmarkov"
"os"
)
func main() {
fname := flag.String("c", "none", "Corpus file path")
startstring := flag.String("s", " ", "string to start with (defaults to space)")
dbname := flag.String("n", "markov", "table name")
dbfname := flag.String("d", "/tmp/testmarkov.sqlite3", "database file name")
idxlen := flag.Int("i", 7, "number of indexes to use")
smart := flag.Bool("m", false, "Smart mode: try and analyze test to detect sentences")
retlen := flag.Int("l", 20, "How many words to chain")
pop := flag.Bool("p", false, "Whether to populate the database or not")
flag.Parse()
fmt.Println(*fname, *startstring, *dbname, *dbfname, *idxlen, *smart, *retlen)
if *idxlen > markov.Maxindex {
fmt.Printf("Too many indexes, maximum is %d\n", markov.Maxindex)
os.Exit(1)
}
m, err := markov.NewMarkov(*dbfname, *dbname)
if err != nil {
fmt.Println("Can't open database file:", err)
os.Exit(1)
}
err = m.Open()
if err != nil {
fmt.Println("Open:", err)
os.Exit(1)
}
defer m.Close()
if *pop {
err = m.PopulateFromFile(*fname, *smart)
if err != nil {
fmt.Printf("%s\n", err)
os.Exit(1)
}
}
str, err := m.Chainmark(*startstring, *retlen, *idxlen)
if err != nil {
fmt.Printf("Error in chainmark: %s\n", err)
}
fmt.Printf("%s\n", str)
os.Exit(0)
}
<file_sep>/makecorp.bash
#!/bin/bash
zncprocess() {
sed -r 's,^\[[0-9:]+] ,,g; /^\*\*\*/d; s,^<[^>]+> ,,g'
}
zncbasedir=$HOME/.znc/moddata/log
corp=/tmp/corps
for net in $zncbasedir/*; do
for chan in $net/*; do
for log in $chan/*; do
done
done
done
find $zncbasedir -type f |xargs cat |zncprocess > $corp
|
037ed22ca54b1d90204ac424ce46b51abd4c4fce
|
[
"Go",
"Shell"
] | 4 |
Go
|
soul9/markov.go
|
3f31e547f3d9cda07b9e47e85dfff6fafeb91d48
|
3375422939f00573724cedb6af8b177d7d652abc
|
refs/heads/master
|
<file_sep><?php
namespace App\Http\Controllers\Handle;
use Illuminate\Http\Request;
use App\Http\Controllers\Controller;
use Illuminate\Support\Str;
use App\Http\Controllers\HandleController;
class VkController extends HandleController
{
public function index(Request $request) {
if($request->json('group_id') == env('VK_GROUP_ID') && $request->json('secret') == env("VK_SECRET")){
$action = Str::camel($request->json('type'));
if(method_exists(get_class($this), $action)){
return call_user_func([$this, $action], $request);
} else {
return "Method not allowed";
}
} else {
return "group_id or secret doesn't confirmed";
}
}
public function confirmation() {
return env("VK_CONFIRMATION_TOKEN");
}
public function messageNew(Request $request) {
return "ok";
}
}
<file_sep><?php
namespace App\Http\Controllers;
use Illuminate\Http\Request;
class HandleController extends Controller
{
//
}
<file_sep><?php
use App\Http\Controllers\Handle\VkController;
Route::post('vk', [VkController::class, 'index']);
|
e21f9dc60211ecce1dd15e52af233766ae0d31e7
|
[
"PHP"
] | 3 |
PHP
|
MrThursby/jarik
|
463fca132ac703da4319fa2c8bf54d9ef512aaa4
|
d8f0bbacee5f0f985d3d4a6fb270a034ab97ef20
|
refs/heads/master
|
<file_sep><?php
use App\Models\MedicalAppointment;
use App\Models\Patient;
use Illuminate\Http\Response;
use Laravel\Lumen\Testing\{
DatabaseMigrations,
DatabaseTransactions
};
class PatientWithMedicalAppointmentTest extends TestCase
{
use DatabaseTransactions;
private $table = 'medical_appointments';
public function getTestMedicalAppontmentData(){
return MedicalAppointment::factory()
->make()
->toArray();
}
/**
* @group index
* @group medical-appointment
* @group medical-appointment.index
* @group patient.medical-appointment
* @group patient.medical-appointment.index
*/
public function testIndex(){
$patient = Patient::has('medicalAppointments')
->with('medicalAppointments')
->inRandomOrder()
->first();
$this->json('GET', route('patient.medical-appointment.index', [
'patient' => $patient
]));
$this->seeStatusCode(Response::HTTP_OK);
$this->seeJsonStructure([
'data' => [],
'meta' => [
'pagination' => [
'total',
'count',
'per_page',
'current_page',
'total_pages',
'links'
]
]
]);
}
/**
* @group show
* @group medical-appointment
* @group medical-appointment.show
* @group patient.medical-appointment
* @group patient.medical-appointment.show
*/
public function testShow(){
$patient = Patient::has('medicalAppointments')
->with('medicalAppointments')
->inRandomOrder()
->first();
$this->json('GET', route('patient.medical-appointment.show', [
'patient' => $patient->id,
'id' => $patient->medicalAppointments->first()->id
]));
$this->seeStatusCode(Response::HTTP_OK);
$this->seeJsonStructure([
'data' => [
'id'
]
]);
}
/**
* @group store
* @group medical-appointment
* @group medical-appointment.store
* @group patient.medical-appointment
* @group patient.medical-appointment.store
*/
public function testStore(){
$payload = $this->getTestMedicalAppontmentData();
$patient = Patient::inRandomOrder()->first();
$this->json('POST', route('patient.medical-appointment.store', [
'patient' => $patient->id
]), $payload);
$this->seeStatusCode(Response::HTTP_CREATED);
$this->seeJsonStructure([
'data' => [
'id'
]
]);
$this->seeInDatabase($this->table, [
'patient_id' => $patient->id,
'record' => $payload['record']
]);
}
/**
* @group update
* @group medical-appointment
* @group medical-appointment.update
* @group patient.medical-appointment
* @group patient.medical-appointment.update
*/
public function testUpdate(){
$patient = Patient::has('medicalAppointments')
->with('medicalAppointments:id,patient_id')
->inRandomOrder()
->first();
$id = $patient->medicalAppointments
->first()
->id;
$payload = $this->getTestMedicalAppontmentData();
$this->json('PUT', route('patient.medical-appointment.update', [
'patient' => $patient->id,
'id' => $id
]), $payload);
$this->seeStatusCode(Response::HTTP_OK);
$this->seeJsonStructure([
'data' => [
'id',
'created_at',
'updated_at'
]
]);
$this->seeInDatabase($this->table, [
'id' => $id,
'patient_id' => $patient->id,
'record' => $payload['record'],
]);
}
/**
* @group destroy
* @group medical-appointment
* @group medical-appointment.destroy
* @group patient.medical-appointment
* @group patient.medical-appointment.destroy
*/
public function testDestroy(){
$patient = Patient::has('medicalAppointments')
->with('medicalAppointments:id,patient_id')
->inRandomOrder()
->first();
$id = $patient->medicalAppointments
->first()
->id;
$this->json('DELETE', route('patient.medical-appointment.destroy', [
'patient' => $patient->id,
'id' => $id
]));
$this->seeStatusCode(Response::HTTP_NO_CONTENT);
$this->notSeeInDatabase($this->table, [
'id' => $id,
'patient_id' => $patient->id,
'deleted_at' => null
]);
}
/**
* @group validation
* @group medical-appointment
* @group medical-appointment.validation
* @group patient.medical-appointment
* @group patient.medical-appointment.validation
*/
public function testAssertRequiredData(){
$patient = Patient::has('medicalAppointments')
->with('medicalAppointments:id,patient_id')
->inRandomOrder()
->first();
$payload = $this->getTestMedicalAppontmentData();
$lastID = MedicalAppointment::select('id')
->orderBy('id', 'desc')
->first()
->id;
foreach ($payload as $key => $value) {
$payload[$key] = null;
}
$this->json('POST', route('patient.medical-appointment.store', [
'patient' => $patient->id
]), $payload);
$this->seeStatusCode(Response::HTTP_UNPROCESSABLE_ENTITY);
$this->seeJsonStructure(array_keys($payload));
$this->notSeeInDatabase($this->table, [
'id' => ($lastID + 1),
]);
}
}
<file_sep><?php
namespace App\Http\Controllers;
use App\Models\MedicalAppointment;
use App\Services\MedicalAppointmentService;
use App\Transformers\MedicalAppointmentTransformer;
use Illuminate\Database\Eloquent\ModelNotFoundException;
use Illuminate\Http\{Request, Response};
class MedicalAppointmentController extends Controller
{
public function __construct(MedicalAppointmentService $service){
$this->service = $service;
}
public function index(int $patient = null){
$medicalAppointments = $this->service->all(self::$resultsPerPage, $patient);
$data = $this->paginate($medicalAppointments, new MedicalAppointmentTransformer);
return response($data);
}
public function show(int $id, int $patient = null){
try{
$medicalAppointment = $this->service->findById($id, $patient);
$data = $this->item($medicalAppointment, new MedicalAppointmentTransformer);
return response($data);
} catch(ModelNotFoundException $e) {
return response(null, Response::HTTP_NOT_FOUND);
} catch(\Exception $e) {
return response([
'message' => $e->getMessage()
], Response::HTTP_BAD_REQUEST);
}
}
public function store(Request $request, int $patient){
$this->validate($request, MedicalAppointment::rules());
$medicalAppointment = $this->service->store($patient, $request->all());
$data = $this->item($medicalAppointment, new MedicalAppointmentTransformer);
return response($data, 201);
}
public function update(Request $request, int $patient, int $id){
$this->validate($request, MedicalAppointment::rules());
try{
$patient = $this->service->update($patient, $id, $request->all());
return $this->item($patient, new MedicalAppointmentTransformer);
} catch(ModelNotFoundException $e) {
return response(null, Response::HTTP_NOT_FOUND);
} catch(\Exception $e) {
return response([
'message' => 'Um erro ocorreu! Nossa equipe jรก foi avisada e estรก verificando.'
], Response::HTTP_BAD_REQUEST);
}
}
public function destroy(int $patient, int $id){
try{
$patient = $this->service->destroy($patient, $id);
return response(null, Response::HTTP_NO_CONTENT);
} catch(ModelNotFoundException $e) {
return response(null, Response::HTTP_NOT_FOUND);
} catch(\Exception $e) {
return response([
'message' => $e->getMessage()
], Response::HTTP_BAD_REQUEST);
}
}
}
<file_sep><?php
namespace App\Http\Controllers;
use App\Models\Patient;
use App\Services\PatientService;
use App\Transformers\PatientTransformer;
use Illuminate\Database\Eloquent\ModelNotFoundException;
use Illuminate\Http\{Request, Response};
class PatientController extends Controller
{
public function __construct(PatientService $service){
$this->service = $service;
}
public function index(){
$patients = $this->service->all(self::$resultsPerPage);
$data = $this->paginate($patients, new PatientTransformer);
return response($data);
}
public function show(int $id){
try{
$patient = $this->service->findById($id);
$data = $this->item($patient, new PatientTransformer);
return response($data);
} catch(ModelNotFoundException $e) {
return response(null, Response::HTTP_NOT_FOUND);
} catch(\Exception $e) {
return response([
'message' => $e->getMessage()
], Response::HTTP_BAD_REQUEST);
}
}
public function store(Request $request){
$this->validate($request, Patient::rules());
$patient = $this->service->store($request->all());
$data = $this->item($patient, new PatientTransformer);
return response($data, 201);
}
public function update(Request $request, int $id){
$this->validate($request, Patient::rules());
try{
$patient = $this->service->update($id, $request->all());
return $this->item($patient, new PatientTransformer);
} catch(ModelNotFoundException $e) {
return response(null, Response::HTTP_NOT_FOUND);
} catch(\Exception $e) {
return response([
'message' => 'Um erro ocorreu! Nossa equipe jรก foi avisada e estรก verificando.'
], Response::HTTP_BAD_REQUEST);
}
}
public function destroy(int $id){
try{
$this->service->destroy($id);
return response(null, Response::HTTP_NO_CONTENT);
} catch(ModelNotFoundException $e) {
return response(null, Response::HTTP_NOT_FOUND);
} catch(\Exception $e) {
return response([
'message' => $e->getMessage()
], Response::HTTP_BAD_REQUEST);
}
}
}
<file_sep><?php
namespace App\Http\Controllers;
use App\Transformers\Transformer;
use Illuminate\Contracts\Pagination\LengthAwarePaginator;
use Illuminate\Http\Request;
use League\Fractal\{
Manager,
Pagination\IlluminatePaginatorAdapter,
Resource\Collection,
Resource\Item
};
use Laravel\Lumen\Routing\Controller as BaseController;
class Controller extends BaseController
{
protected static $resultsPerPage = 25;
private function getFractalManager()
{
$manager = new Manager();
if (isset($_GET['include'])) {
$manager->parseIncludes($_GET['include']);
}
return $manager;
}
public function item($data, Transformer $transformer)
{
$manager = $this->getFractalManager();
$resource = new Item($data, $transformer, $transformer->type);
return $manager->createData($resource)->toArray();
}
public function collection($data, Transformer $transformer)
{
$manager = $this->getFractalManager();
$resource = new Collection($data, $transformer, $transformer->type);
return $manager->createData($resource)->toArray();
}
public function paginate(LengthAwarePaginator $data, Transformer $transformer)
{
$manager = $this->getFractalManager();
$resource = new Collection($data, $transformer, $transformer->type);
$resource->setPaginator(new IlluminatePaginatorAdapter($data));
return $manager->createData($resource)->toArray();
}
}
<file_sep><?php
return [
'name' => env('APP_NAME'),
'env' => env('APP_ENV'),
'debug' => env('APP_DEBUG'),
'url' => env('APP_URL'),
'timezone' => env('APP_TIMEZONE'),
'locale' => 'pt_BR',
'fallback_locale' => 'en',
'key' => '',
'cipher' => 'AES-256-CBC',
'faker_locale' => 'pt_BR'
];<file_sep><?php
namespace App\Models;
use Illuminate\Database\Eloquent\{
Model,
SoftDeletes,
Factories\HasFactory
};
class MedicalAppointment extends Model
{
use HasFactory, SoftDeletes;
public static function rules(){
return [
'record' => 'required|string|max:1000',
];
}
protected $fillable = [
'record'
];
protected static function boot(){
parent::boot();
static::addGlobalScope('order', function($builder){
$builder->orderBy('updated_at', 'desc');
});
}
public function patient(){
return $this->belongsTo(Patient::class);
}
}
<file_sep><?php
namespace App\Models;
use Carbon\Carbon;
use Illuminate\Database\Eloquent\{
Model,
SoftDeletes,
Factories\HasFactory
};
class Patient extends Model
{
use HasFactory, SoftDeletes;
protected $casts = [
'birthdate' => 'date:d/m/Y'
];
public static function rules(){
$genders = implode(',', self::$genders);
return [
'name' => 'required|string|max:255',
'birthdate' => 'required|date_format:d/m/Y',
'gender' => "required|string|in:{$genders}",
'document' => 'required|string|cpf|unique:patients,document',
];
}
public static $genders = [
'M',
'F',
'T',
'O'
];
protected $fillable = [
'name',
'birthdate',
'gender',
'document'
];
protected static function boot(){
parent::boot();
static::addGlobalScope('order', function($builder){
$builder->orderBy('name');
});
}
public function medicalAppointments(){
return $this->hasMany(MedicalAppointment::class);
}
public function setBirthdateAttribute(string $birthdate){
$this->attributes['birthdate'] = Carbon::createFromFormat('d/m/Y', $birthdate);
}
}
<file_sep><?php
namespace App\Services;
use App\Models\Patient;
class PatientService
{
public function all(int $resultsPerPage){
return Patient::paginate($resultsPerPage);
}
public function findById(int $id){
return Patient::findOrFail($id);
}
public function store(array $data){
return Patient::create($data);
}
public function update(int $id, array $data){
$patient = $this->findById($id);
$patient->update($data);
return $patient;
}
public function destroy(int $id){
$patient = $this->findById($id);
$patient->delete();
return $patient;
}
}<file_sep><?php
namespace App\Services;
use App\Models\{Patient, MedicalAppointment};
class MedicalAppointmentService
{
public function all(int $resultsPerPage, int $patient = null){
if( is_null($patient) ){
return MedicalAppointment::paginate($resultsPerPage);
}
return Patient::findOrFail($patient)
->medicalAppointments()
->paginate($resultsPerPage);
}
public function findById(int $id, int $patient = null){
if( is_null($patient) ){
return MedicalAppointment::findOrFail($id);
}
return Patient::findOrFail($patient)
->medicalAppointments()
->findOrFail($id);
}
public function store(int $patient, array $data){
$patient = Patient::findOrFail($patient)
->medicalAppointments()
->create($data);
return $patient;
}
public function update(int $patient, int $id, array $data){
$medicalAppointment = $this->findById($id, $patient);
$medicalAppointment->update($data);
return $medicalAppointment;
}
public function destroy(int $patient, int $id){
$medicalAppointment = $this->findById($id, $patient);
$medicalAppointment->delete();
return $medicalAppointment;
}
}<file_sep><?php
use App\Models\MedicalAppointment;
use App\Models\Patient;
use Illuminate\Http\Response;
use Laravel\Lumen\Testing\{
DatabaseMigrations,
DatabaseTransactions
};
class MedicalAppointmentTest extends TestCase
{
use DatabaseTransactions;
private $table = 'medical_appointments';
public function getTestMedicalAppontmentData(){
return MedicalAppointment::factory()
->make()
->toArray();
}
/**
* @group index
* @group medical-appointment
* @group medical-appointment.index
*/
public function testIndex(){
$this->json('GET', route('medical-appointment.index'));
$this->seeStatusCode(Response::HTTP_OK);
$this->seeJsonStructure([
'data' => [],
'meta' => [
'pagination' => [
'total',
'count',
'per_page',
'current_page',
'total_pages',
'links'
]
]
]);
}
/**
* @group show
* @group medical-appointment
* @group medical-appointment.show
*/
public function testShow(){
$id = MedicalAppointment::inRandomOrder()
->first()
->id;
$this->json('GET', route('medical-appointment.show', [
'id' => $id,
]));
$this->seeStatusCode(Response::HTTP_OK);
$this->seeJsonStructure([
'data' => [
'id'
]
]);
}
}
<file_sep># ProRadis
## Docker
Usado para o banco de dados. Contรฉm uma imagem do mysql 5.6
## Para rodar o projeto
php -S localhost:9000 -t public
## Para rodar os testes
composer test<file_sep><?php
use App\Models\Patient;
use Illuminate\Http\Response;
use Laravel\Lumen\Testing\{
DatabaseMigrations,
DatabaseTransactions
};
class PatientTest extends TestCase
{
use DatabaseTransactions;
private $table = 'patients';
public function getTestPacientData(){
return Patient::factory()
->make()
->toArray();
}
/**
* @group index
* @group patient
* @group patient.index
*/
public function testIndex(){
$this->json('GET', route('patient.index'));
$this->seeStatusCode(Response::HTTP_OK);
$this->seeJsonStructure([
'data',
'meta' => [
'pagination' => [
'total',
'count',
'per_page',
'current_page',
'total_pages',
'links'
]
]
]);
}
/**
* @group show
* @group patient
* @group patient.show
*/
public function testShow(){
$id = Patient::select('id')
->inRandomOrder()
->first()
->id;
$this->json('GET', route('patient.show', [
'id' => $id
]));
$this->seeStatusCode(Response::HTTP_OK);
$this->seeJsonStructure([
'data' => [
'id'
]
]);
}
/**
* @group store
* @group patient
* @group patient.store
*/
public function testStore(){
$payload = $this->getTestPacientData();
$this->json('POST', route('patient.store'), $payload);
$this->seeStatusCode(Response::HTTP_CREATED);
$this->seeJsonStructure([
'data' => [
'id'
]
]);
$this->seeInDatabase('patients', [
'name' => $payload['name'],
'document' => $payload['document']
]);
}
/**
* @group update
* @group patient
* @group patient.update
*/
public function testUpdate(){
$id = Patient::select('id')
->inRandomOrder()
->first()
->id;
$payload = $this->getTestPacientData();
$this->json('PUT', route('patient.update', [
'id' => $id
]), $payload);
$this->seeStatusCode(Response::HTTP_OK);
$this->seeJsonStructure([
'data' => [
'id',
'created_at',
'updated_at'
]
]);
$this->seeInDatabase('patients', [
'id' => $id,
'name' => $payload['name'],
'document' => $payload['document']
]);
}
/**
* @group destroy
* @group patient
* @group patient.destroy
*/
public function testDestroy(){
$id = Patient::select('id')
->inRandomOrder()
->first()
->id;
$this->json('DELETE', route('patient.destroy', [
'id' => $id
]));
$this->seeStatusCode(Response::HTTP_NO_CONTENT);
$this->notSeeInDatabase('patients', [
'id' => $id,
'deleted_at' => null
]);
}
/**
* @group validation
* @group patient
* @group patient.validation
*/
public function testAssertRequiredData(){
$payload = $this->getTestPacientData();
$lastID = Patient::select('id')
->orderBy('id', 'desc')
->first()
->id;
foreach ($payload as $key => $value) {
$payload[$key] = null;
}
$this->json('POST', route('patient.store'), $payload);
$this->seeStatusCode(Response::HTTP_UNPROCESSABLE_ENTITY);
$this->seeJsonStructure(array_keys($payload));
$this->notSeeInDatabase('patients', [
'id' => ($lastID + 1),
]);
}
}
<file_sep><?php
namespace App\Transformers;
use Illuminate\Database\Eloquent\Model;
use League\Fractal\TransformerAbstract;
abstract class Transformer extends TransformerAbstract
{
public $type = 'unknown';
protected $fields;
abstract public function transform(Model $model);
}<file_sep><?php
namespace App\Transformers;
use App\Models\MedicalAppointment;
class MedicalAppointmentTransformer extends Transformer
{
protected $availableIncludes = [
'patient'
];
public function transform($medicalAppointment){
return [
'id' => (int)$medicalAppointment->id,
'record' => $medicalAppointment->record,
'patient_id' => $medicalAppointment->patient_id,
'created_at' => $medicalAppointment->created_at,
'updated_at' => $medicalAppointment->updated_at,
'deleted_at' => $medicalAppointment->deleted_at
];
}
public function includePatient(MedicalAppointment $medicalAppointment)
{
$patient = $medicalAppointment->patient;
return $this->item($patient, new PatientTransformer);
}
}<file_sep><?php
/** @var \Laravel\Lumen\Routing\Router $router */
/*
|--------------------------------------------------------------------------
| Application Routes
|--------------------------------------------------------------------------
|
| Here is where you can register all of the routes for an application.
| It is a breeze. Simply tell Lumen the URIs it should respond to
| and give it the Closure to call when that URI is requested.
|
*/
$router->get('/', function () use ($router) {
return $router->app->version();
});
$router->group([
'prefix' => '/patients',
], function () use ($router) {
$router->get('/', [
'as' => 'patient.index',
'uses' => 'PatientController@index'
]);
$router->get('/{id}', [
'as' => 'patient.show',
'uses' => 'PatientController@show'
]);
$router->post('/', [
'as' => 'patient.store',
'uses' => 'PatientController@store'
]);
$router->put('/{id}', [
'as' => 'patient.update',
'uses' => 'PatientController@update'
]);
$router->delete('/{id}', [
'as' => 'patient.destroy',
'uses' => 'PatientController@destroy'
]);
$router->group([
'prefix' => '{patient}/medical-appointments',
], function () use ($router) {
$router->get('/', [
'as' => 'patient.medical-appointment.index',
'uses' => 'MedicalAppointmentController@index'
]);
$router->get('/{id}', [
'as' => 'patient.medical-appointment.show',
'uses' => 'MedicalAppointmentController@show'
]);
$router->post('/', [
'as' => 'patient.medical-appointment.store',
'uses' => 'MedicalAppointmentController@store'
]);
$router->put('/{id}', [
'as' => 'patient.medical-appointment.update',
'uses' => 'MedicalAppointmentController@update'
]);
$router->delete('/{id}', [
'as' => 'patient.medical-appointment.destroy',
'uses' => 'MedicalAppointmentController@destroy'
]);
});
});
$router->group([
'prefix' => 'medical-appointments',
], function () use ($router) {
$router->get('/', [
'as' => 'medical-appointment.index',
'uses' => 'MedicalAppointmentController@index'
]);
$router->get('/{id}', [
'as' => 'medical-appointment.show',
'uses' => 'MedicalAppointmentController@show'
]);
});<file_sep><?php
namespace App\Transformers;
use App\Models\Patient;
class PatientTransformer extends Transformer
{
protected $availableIncludes = [
'medicalAppointments'
];
public function transform($patient){
return [
'id' => (int)$patient->id,
'name' => $patient->name,
'birthdate' => $patient->birthdate,
'gender' => $patient->gender,
'document' => $patient->document,
'created_at' => $patient->created_at,
'updated_at' => $patient->updated_at,
'deleted_at' => $patient->deleted_at
];
}
public function includeMedicalAppointments(Patient $patient)
{
$medicalAppointments = $patient->medicalAppointments;
return $this->collection($medicalAppointments, new MedicalAppointmentTransformer);
}
}
|
e4690e4c1e2d5bc016dedce75ebae4d95f858ae3
|
[
"Markdown",
"PHP"
] | 16 |
PHP
|
waverapha/proradis-back
|
5899d7672c0d4b292b12f0a786816b663e736b93
|
fe08c7028f6cd8851770be29688a2b6a388aaa58
|
refs/heads/master
|
<repo_name>colebryant/bootcamp-frontend<file_sep>/jquery/jquery-lesson/README.md
# Let's learn some jQuery!
1. Clone this repo.
1. Code along as much as you can, but if you get lost don't worry! I'll post the updated code at the end of the lesson.
1. WOOOOOOOOOOOOOOO!!
_Thank you, <NAME>, for building this wonderful jQuery lesson for NSS_
<file_sep>/react/student-exercise/src/components/Students.js
import React, { Component } from "react";
export default class Students extends Component {
render() {
return (
<React.Fragment>
<h1>These are the students:</h1>
{
this.props.students.map(student =>
<p key={student.id}>{student.firstName} {student.lastName}</p>
)
}
</React.Fragment>
)
}
}
<file_sep>/nutshell/README.md

# Nutshell: The Information Dashboard
Nutshell is a new product offering that our group at NSS was tasked with building. It's a dashboard for people to use to organize their daily tasks, events, news article, friends, and chat messages. I worked on the events feature in addition to the ERD.
### Accessing the App
To run Nutshell locally, create an empty directory and clone the project by running the following command in your terminal: ``` <EMAIL>:colebryant/nutshell-divine-madness.git ```
Go to the ```src/lib``` folder in the project and run: ```npm init```
Run the following command to install all libraries and their dependencies: ```npm install```
To run Nutshell in the browser, run: ```grunt```
Open your preferred browser and go to: [http://localhost:8080/]( http://localhost:8080/)
In another window of your terminal traverse into ```src/api``` and run: ```json-server -p 8088 -w database.json```
### We covered a number of topics during the creation of Nutshell, including:
1. Functions
1. Databases/API
1. Github
1. Objects
1. CSS
1. Handling user events
1. Factory functions
1. Data entry/editing
1. Modular code with Browserify
1. Relational data
### Contributors:
* [<NAME>]( https://github.com/jdbbaugh)
* [<NAME>]( https://github.com/JordanRosas)
* [<NAME>]( https://github.com/colebryant)
* [<NAME>]( https://github.com/alagrad94)
* [JD Wheeler]( https://github.com/loetek)
<file_sep>/react/kennel/src/components/location/LocationDetail.js
import React, { Component } from "react"
import "./LocationList"
export default class LocationDetail extends Component {
render() {
const location = this.props.location.find(a => a.id === parseInt(this.props.match.params.locationId)) || {}
return (
<section className="location">
<div key={location.id} className="card">
<div className="card-body">
<h4 className="card-title">
</h4>
</div>
</div>
</section>
)
}
}<file_sep>/javascript/array-methods/search.js
// Lightning Exercise 1: Refactor your code to search for purchasing agents instead.
// If the search text is found in the first name of any purchasing agent, show that agent.
// Lightning Exercise 2: Refactor your code so that if the search text is found in the first name, or last name,
// of any purchasing agent, show that agent.
document
.querySelector("#agentSearch")
.addEventListener("keypress", keyPressEvent => {
if (keyPressEvent.charCode === 13) {
/* WHEN USER PRESSES ENTER, FIND MATCHING BUSINESS (13 IS THE ENTER KEY CODE) */
const foundBusiness = businesses.find(
business =>
business.purchasingAgent.nameFirst.includes(keyPressEvent.target.value) ||
business.purchasingAgent.nameLast.includes(keyPressEvent.target.value)
);
outEl.innerHTML = `
<h2>
${foundBusiness.purchasingAgent.nameFirst} ${foundBusiness.purchasingAgent.nameLast}
</h2>
<section>
${foundBusiness.companyName}
</section>
<section>
${foundBusiness.addressFullStreet}
</section>
<section>
${foundBusiness.addressCity},
${foundBusiness.addressStateCode}
${foundBusiness.addressZipCode}
</section>
`;
}
});
<file_sep>/javascript/array-methods/dotard.js
// Lightning Exercise: Add another section sibling to the current one and use object dot notation to display each company's city.
// Use square bracket notation to display the state code. Use dynamic square bracket notation to add the zip code.
const zipVar = "addressZipCode";
const outEl = document.querySelector("#output")
outEl.innerHTML = "<h1>Active Businesses</h1>"
businesses.forEach(business => {
outEl.innerHTML += `
<h2>${business.companyName}</h2>
<section>
${business.addressFullStreet}
</section>
<section>
${business.addressCity}, ${business["addressStateCode"]} ${business[zipVar]}
</section>
`
outEl.innerHTML += "<hr/>"
});
let nyBiz = [];
const newYorkBusinesses = businesses.filter(business => {
if (business.addressStateCode === "NY") {
nyBiz.push(business);
}
});
console.log(nyBiz);
// Lightning Exercise: Use filter() to create another array named manufacturingBusinesses that will contain all businesses in the manufacturing industry.
// Display those to the DOM.
let manufacturingBusinesses = [];
const manuBiz = businesses.filter(business => {
if (business.companyIndustry === "Manufacturing") {
manufacturingBusinesses.push(business);
}
});
outEl.innerHTML += "<h1>Manufacturing Businesses</h1>";
manufacturingBusinesses.forEach(business => {
outEl.innerHTML += `
<h2>${business.companyName}</h2>
<section>
${business.addressFullStreet}
</section>
<section>
${business.addressCity}, ${business["addressStateCode"]} ${business[zipVar]}
</section>
`;
outEl.innerHTML += `<hr/>`;
});<file_sep>/react/student-exercise/src/App.js
import React, { Component } from 'react';
import { Route } from 'react-router-dom';
import APIManager from './modules/APIManager';
import Students from './components/Students';
import './App.css';
export default class App extends Component {
state = {
students: []
}
componentDidMount() {
APIManager.getAll()
.then(students => this.setState({
students: students
}))
}
render() {
return (
<React.Fragment>
<Route path="/students" render={() => {
return <Students students={this.state.students} />
}} />
</React.Fragment>
);
}
}
<file_sep>/browserify-practice/browserify-product-ordering/src/scripts/Review.js
// This module creates the review component to be appended to the DOM
const Review = {
createReviewComponent (reviewObject) {
let docuFrag = document.createDocumentFragment();
let reviewTitle = document.createElement("h3");
reviewTitle.textContent = `Review: ${reviewObject.reviewTitle}`;
let reviewText = document.createElement("p");
reviewText.textContent = reviewObject.reviewText;
docuFrag.appendChild(reviewTitle);
docuFrag.appendChild(reviewText);
let reviewOutput = document.querySelector(`.pieReview${reviewObject.productId}`);
reviewOutput.appendChild(docuFrag);
}
};
export default Review;<file_sep>/javascript/food-api/src/scripts/foodapi.js
fetch("http://localhost:8088/food")
.then(foods => foods.json())
.then(parsedFoods => {
parsedFoods.forEach(food => {
fetch(`https://world.openfoodfacts.org/api/v0/product/${food.barcode}.json`)
.then(productInfo => productInfo.json())
.then(parsedProductInfo => {
const ingredientList = parsedProductInfo.product.ingredients_text;
const country = parsedProductInfo.product.countries;
const calories = parsedProductInfo.product.nutriments.energy_serving;
const fat = parsedProductInfo.product.nutriments.fat_serving;
const sugar = parsedProductInfo.product.nutriments.sugars_serving;
const foodAsHTML = createHTML(food, ingredientList, country, calories, fat, sugar);
addFoodToDom(foodAsHTML);
});
});
});
const createHTML = (food, ingredientList, country, calories, fat, sugar) => {
const div = document.createElement("div");
const h1 = document.createElement("h1");
h1.textContent = food.name;
const p1 = document.createElement("p");
p1.textContent = `Food Type: ${food.type}`;
const p2 = document.createElement("p");
p2.textContent = `Ethnicity: ${food.ethnicity}`;
const p3 = document.createElement("p");
p3.textContent = `Ingredients: ${ingredientList}`;
const p4 = document.createElement("p");
// Can't seem to find country of origin in json files so just pulling "country"
p4.textContent = `Country: ${country}`;
const p5 = document.createElement("p");
p5.textContent = `Calories per Serving (kCal): ${calories}`;
const p6 = document.createElement("p");
p6.textContent = `Fat per Serving (g): ${fat}`;
const p7 = document.createElement("p");
p7.textContent = `Sugar per Serving (g): ${sugar}`;
div.appendChild(h1);
div.appendChild(p1);
div.appendChild(p2);
div.appendChild(p3);
div.appendChild(p4);
div.appendChild(p5);
div.appendChild(p6);
div.appendChild(p7);
return div;
};
const addFoodToDom = (foodAsHTML) => {
const headSection = document.querySelector(".foodList");
headSection.appendChild(foodAsHTML);
};<file_sep>/browserify-practice/browserify-boilerplate/src/scripts/main.js
/*
Author: your name here
Name: main.js
Purpose: Entry point of our application
*/
import createCar from "./createCar"
import createGarage from "./createGarage"
// Create four cars using the function you imported.
// Each of these variables contains an object that the factory function returns
const mustang = createCar("Ford", "Mustang")
const accord = createCar("Honda", "Accord")
const santafe = createCar("Hyundai", "Santa Fe")
const sierra = createCar("GMC", "Sierra")
// Make a new garage and store cars in it
const garage = createGarage() //Remember, this function return an object
garage.store(mustang)
garage.store(accord)
garage.store(santafe)
garage.store(sierra)
console.table(garage.getInventory())
console.table(garage.retrieve(sierra))
<file_sep>/javascript/objects-challenge/scripts.js
const elizabethSinger = {
congressionalDistrict: "Nashville",
platformStatements: {
taxes: "Taxes are lame but we need money",
jobs: "Jobs are good",
infrastructure: "We need it",
healthcare: "Gotta get healthier, folks",
crimeAndEnforcement: "Good police officers who do their job"
},
donationURL: "https//www.wikipedia.org",
calendarOfEvents: [
{
date: "11/26/2018",
time: "5:00 pm",
name: "Eat a donut",
purpose: "To eat a donut with Elizabeth Singer"
},
{
date: "11/27/2018",
time: "4:00 pm",
name: "Give money to Elizabeth",
purpose: "Allow Elizabeth to have more money for her campaign"
},
{
date: "11/27/2018",
time: "4:00 am",
name: "Questions with Elizabeth",
purpose: "Ask any questions you want to Elizabeth"
}
],
volunteerInformation: [
{
name: "Bill",
address: "Across the way",
email: "<EMAIL>",
phoneNumber: "111-111-1111",
availability: "always",
duties: "Answering phone calls"
},
{
name: "Mary",
address: "In Nashville",
email: "<EMAIL>",
phoneNumber: "111-111-1111",
availability: "always",
duties: "Fundraising"
},
{
name: "Jerry",
address: "Not answering",
email: "<EMAIL>",
phoneNumber: "111-111-1111",
availability: "never",
duties: "Being on the volunteer list"
}
],
biography: "Elizabeth Singer was born to lead. After being raised by coyotes in the mountains of East Tennessee, she grew to lead them herself. Then she moved to Nashville to become a congresswoman.",
imageGallery: [
"https://upload.wikimedia.org/wikipedia/commons/7/7d/Marie_Antoinette_by_Joseph_Ducreux.jpg",
"https://upload.wikimedia.org/wikipedia/commons/b/b2/The_Royal_Family_of_France_in_1781_by_an_anonymous_artist.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/5/5a/Josef_Hauzinger_002.jpg/330px-Josef_Hauzinger_002.jpg"
],
missionStatement: "To put meat on the table for Nashville",
votingURL: "https://www.google.com"
}
// Below are the functions used to change the existing property values or to add new sub-properties depending on the property
function changeDistrict(newDistrict) {
elizabethSinger.congressionalDistrict = newDistrict;
}
function changePlatform(topic, newPlatformStatement) {
elizabethSinger.platformStatements[topic] = newPlatformStatement;
}
function changeDonationURL(newDonationURL) {
elizabethSinger.donationURL = newDonationURL;
}
function addToCalendarofEvents(dateEntry, timeEntry, nameEntry, purposeEntry) {
elizabethSinger.calendarOfEvents.push({
date: dateEntry,
time: timeEntry,
name: nameEntry,
purpose: purposeEntry,
})
}
function addToVolunteerList(nameEntry, addressEntry, emailEntry, phoneEntry, availEntry, dutiesEntry) {
elizabethSinger.volunteerInformation.push({
name: nameEntry,
address: addressEntry,
email: emailEntry,
phoneNumber: phoneEntry,
availability: availEntry,
duties: dutiesEntry
})
}
function changeBiography(newBiography) {
elizabethSinger.biography = newBiography;
}
function addImageToGallery (newImageURL) {
elizabethSinger.imageGallery += newImageURL;
}
function changeMissionStatement (newStatement) {
elizabethSinger.missionStatement = newStatement;
}
function newVotingURL (newURL) {
elizabethSinger.votingURL = newURL;
}
<file_sep>/kennel-company/src/Kennel.js
import React, { Component } from "react";
import EmployeeList from "./EmployeeList";
import LocationList from "./LocationList";
import AnimalList from "./AnimalList";
export default class Kennel extends Component {
state = {
employees: [
{ id: 1, name: "<NAME>" },
{ id: 2, name: "<NAME>" },
{ id: 3, name: "<NAME>" },
{ id: 4, name: "<NAME>" }
],
locations: [
{ id: 1, name: "Nashville North" },
{ id: 2, name: "Nashville South" }
],
animals: [
{ id: 1, name: "Doodles" },
{ id: 2, name: "Jack" },
{ id: 3, name: "Angus" },
{ id: 4, name: "Henley" },
{ id: 5, name: "Derkins" },
{ id: 6, name: "Checkers" }
]
};
render() {
return (
<React.Fragment>
<LocationList locations={this.state.locations} />
<EmployeeList employees={this.state.employees} />
<AnimalList animals={this.state.animals} />
</React.Fragment>
);
}
}
<file_sep>/javascript/database-small-business/src/scripts/main.js
import businessData from "./module1";
/*
Once your data is normalized, use your DOM skills to display a card for each employee.
It should display the employee name, the name of their department, and which computer they are using.
<article class="employee">
<header class="employee__name">
<h1><NAME></h1>
</header>
<section class="employee__department">
Works in the IT department
</section>
<section class="employee__computer">
Currently using a 2015 MacBook Pro
</section>
</article>
*/
const buildDom = () => {
const output = $(".output");
businessData.employees.forEach(employee => {
const cardContainer = $("<article>").addClass("employee");
const cardHeader = $("<header>").addClass("employee__name").text(employee.name);
cardHeader.appendTo(cardContainer);
const cardDept = $("<section>").addClass("employee__department").text(`Works in the ${businessData.departments[employee.departmentId-1].name} department`);
cardDept.appendTo(cardContainer);
const cardComp = $("<section>").addClass("employee__computer").text(`Currently using a ${businessData.computers[employee.computerId-1].type}`);
cardComp.appendTo(cardContainer);
cardContainer.appendTo(output);
});
};
buildDom();
<file_sep>/javascript/legoBricks/src/scripts/main.js
domBuilder.appendInputForm();
domBuilder.appendDropDown();
let legoBtn = document.querySelector(".lego__save");
// console.log(legoBtn);
legoBtn.addEventListener("click", eventListeners.handleFormSubmission);<file_sep>/javascript/array-methods-spam/spamSpamSpam.js
const newArray = customers.map(customer => {
return customer.contacts.email.join(", ");
});
console.log(newArray);<file_sep>/javascript/javascript-components/scripts.js
const students = [
{
name: "<NAME>",
class: "History",
info: "Failed last exam",
score: 59
},
{
name: "<NAME>",
class: "History",
info: "Has completed all homework",
score: 91
},
{
name: "<NAME>",
class: "History",
info: "Wonderful at helping other students",
score: 88
},
{
name: "<NAME>",
class: "History",
info: "Has never missed a class or exam",
score: 92
},
{
name: "<NAME>",
class: "History",
info: "Sub-par performance all around",
score: 64
},
{
name: "<NAME>",
class: "History",
info: "Wonderful student",
score: 97
},
{
name: "<NAME>",
class: "History",
info: "Smokes too much. Distracting.",
score: 76
},
{
name: "<NAME>",
class: "History",
info: "Falls asleep in class",
score: 79
},
{
name: "<NAME>",
class: "History",
info: "Talks too much",
score: 83
},
{
name: "<NAME>",
class: "History",
info: "Asks pointless, unrelated questions",
score: 78
},
{
name: "<NAME>",
class: "History",
info: "When was the last time he attended class?",
score: 48
},
{
name: "<NAME>",
class: "History",
info: "Needs to contribute to in-class discussions",
score: 95
}
]
const h1 = (...props) => {
const h1Element = document.createElement('h1');
h1Element.textContent = props[0];
h1Element.classList.add("xx-large", props[1]);
return h1Element;
}
const section = (...props) => {
const sectionElement = document.createElement("section");
sectionElement.textContent = props[0];
sectionElement.classList.add(props[1]);
return sectionElement;
}
const aside = (...props) => {
const asideElement = document.createElement("aside");
asideElement.textContent = props[0];
asideElement.classList.add("bordered", "dashed", props[1]);
return asideElement;
}
const div = (...props) => {
const divElement = document.createElement("div");
divElement.classList.add(props[0]);
return divElement;
}
// Below: attempting to create universal function
// const addElement = (...props) => {
// const elementToAdd = document.createElement()
// }
for (let i = 0; i < students.length; i++) {
let studentComponent = document.getElementById("container");
let divElement = div("student");
studentComponent.appendChild(divElement);
if (students[i].score >= 60) {
divElement.appendChild(h1(students[i].name, "passing"));
divElement.appendChild(section(students[i].class, "section--padded"));
divElement.appendChild(aside(students[i].info, "pushRight"));
} else {
divElement.appendChild(h1(students[i].name, "failing"));
divElement.appendChild(section(students[i].class, "section--padded"));
divElement.appendChild(aside(students[i].info, "pushRight"));
}
}<file_sep>/ternary-traveler/src/scripts/data.js
// The purpose of this module is to provide a template for fetch requests to database.json
const data = {
fetchRequest (fetchObject) {
let dataSet = fetchObject.dataSet;
let fetchType = fetchObject.fetchType;
let specificId = fetchObject.specificId;
let databaseObject = fetchObject.databaseObject;
if (fetchType === "GET" && specificId) {
return fetch(`http://localhost:8088/${dataSet}/${specificId}`)
.then(response => response.json());
} else if (fetchType === "GET") {
return fetch(`http://localhost:8088/${dataSet}`)
.then(response => response.json());
} else if (fetchType === "POST") {
return fetch (`http://localhost:8088/${dataSet}`, {
method: fetchType,
headers: {
"Content-Type": "application/json; charset=utf-8"
},
body: JSON.stringify(databaseObject)
})
} else if (fetchType === "DELETE") {
return fetch(`http://localhost:8088/${dataSet}/${specificId}`, {
method: fetchType,
headers: {
"Content-Type": "application/json; charset=utf-8"
}
})
} else if (fetchType === "PUT") {
return fetch(`http://localhost:8088/${dataSet}/${specificId}`, {
method: fetchType,
headers: {
"Content-Type": "application/json; charset=utf-8"
},
body: JSON.stringify(databaseObject)
})
} else {
console.log("fetch didn't work");
}
}
};
export default data;<file_sep>/javascript/datetime-exercise/main.js
/* Lightning Exercise
Setup a simple application with just index.html and main.js (no browserify/grunt and no React).
โ
1) This application should display the current date & time on the DOM
2) There should be a button that when clicked, updates the current date & time on the DOM
3) Push it up to Github
4) Once you are finished, slack Emily or Leah your repo link
โ
*notes: this is a simple web application, so you will need to run the http server */
const output = document.getElementById("output");
const dateTimeElement = document.createElement("p");
let initialDateTime = new Date();
let initialYear = initialDateTime.getFullYear();
let initialMonth = initialDateTime.getMonth();
let initialDay = initialDateTime.getDate();
const months = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December"
];
let initialMonthName = months[initialMonth];
let initialHour = initialDateTime.getHours();
let amPm = "";
const hourify = () => {
if (initialHour <= 12) {
amPm = "A.M.";
return initialHour;
} else if (initialHour > 12) {
amPm = "P.M.";
return initialHour - 12;
};
};
const initalHourName = hourify();
const initialMinutes = initialDateTime.getMinutes();
const initialSeconds = initialDateTime.getSeconds();
dateTimeElement.textContent = `It is ${initalHourName}:${initialMinutes}:${initialSeconds}${amPm} on ${initialMonthName} ${initialDay}, ${initialYear}`;
output.appendChild(dateTimeElement);
const updateButton = document.createElement("button");
updateButton.textContent = "update";
updateButton.addEventListener("click", () => {
let currentDateTime = new Date();
let currentYear = currentDateTime.getFullYear();
let currentMonth = currentDateTime.getMonth();
let currentDay = currentDateTime.getDate();
let currentMonthName = months[currentMonth];
let currentHour = currentDateTime.getHours();
let amPm = "";
const hourify = () => {
if (currentHour <= 12) {
amPm = "A.M.";
return currentHour;
} else if (currentHour > 12) {
amPm = "P.M.";
return currentHour - 12;
};
};
const currentHourName = hourify();
const currentMinutes = currentDateTime.getMinutes();
const currentSeconds = currentDateTime.getSeconds();
dateTimeElement.textContent = `It is ${currentHourName}:${currentMinutes}:${currentSeconds}${amPm} on ${currentMonthName} ${currentDay}, ${currentYear}`;
});
output.appendChild(updateButton);
<file_sep>/daily-journal/src/scripts/data.js
// This module is responsible for all data fetches to the json database
const data = {
getJournalEntries () {
return fetch("http://localhost:8088/journalEntries", {
headers: {
"Cache-Control": "private"
}
})
.then(response => response.json());
},
postJournalEntry (entryToPost) {
return fetch("http://localhost:8088/journalEntries", {
method: "POST",
headers: {
"Content-Type": "application/json"
},
body: JSON.stringify(entryToPost)
})
}
};
export default data;<file_sep>/browserify-practice/browserify-product-ordering/src/scripts/ReviewData.js
// This module fetches the review data from the API
const ReviewData = {
getReviewData () {
return fetch("http://localhost:8088/reviews")
.then(response => response.json())
}
};
export default ReviewData;
<file_sep>/browserify-practice/browserify-contact-list/src/scripts/main.js
// Import the ContactList component and the ContactForm component.
import contactList from "./contactList";
import contactForm from "./contactForm";
contactForm.makeForm();
contactList.contactify();
<file_sep>/browserify-practice/browserify-boilerplate/src/scripts/createGarage.js
/*
Author: your name here
Name: createGarage.js
Purpose: To store cars in garages
*/
/*
This array only exists within the scope of this module.
Therefore, no other module can access it. However,
the object returned by `createGarage` object you define below allows
code in other modules to indirectly access it by using
the methods.
*/
const garage = []
const createGarage = function() {
return {
store (car) {
garage.push(car)
},
retrieve(carToFind) {
// For more information about the Array.find method: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/find
return garage.find(car => car.make === carToFind.make && car.model === carToFind.model)
},
/*
The getInventory property is the only way for external code to
read the value of the garage variable. There is no setter
either. It is a read only property.
*/
getInventory() {
console.log(garage)
return garage
}
}
};
export default createGarage
<file_sep>/javascript/javascript-data/README.md
# javascript-data
<file_sep>/browserify-practice/browserify-boilerplate/README.md
# Car Garage application using Browserify
<file_sep>/daily-journal/src/scripts/domBuilder.js
// This module, when called, is responsible for appending the form in addition to rendering the journal entries and attaching them to the entry log section of the DOM
import domComponent from "./domComponent";
const domBuilder = {
renderForm () {
domComponent.makeFormComponent();
},
renderJournalEntries (entries) {
entries.forEach(entry => {
const sectionComponent = domComponent.makeJournalEntryComponent(entry);
$("#entryLog").append(sectionComponent);
});
}
};
export default domBuilder;<file_sep>/javascript/database-small-business/src/scripts/module1.js
/*
A small business wants to keep track of its employees and the computers that they use.
Each employee is assigned to a department, and they each get assigned a computer when they join the company.
1) Build arrays of objects that represent Employees, Departments, and Computers.
2) Assign every resource a unique id property.
3) Assign each employee to a department using a foreign key.
4) Assign each employee a computer using a foreign key.
*/
const businessData = {
employees: [
{
employeeId: 1,
name: "Dwight",
departmentId: 1,
computerId: 1,
},
{
employeeId: 2,
name: "Michael",
departmentId: 4,
computerId: 2,
},
{
employeeId: 3,
name: "Pam",
departmentId: 2,
computerId: 3
},
{
employeeId: 4,
name: "Jim",
departmentId: 1,
computerId: 4
},
{
employeeId: 5,
name: "Andy",
departmentId: 4,
computerId: 5
},
{
employeeId: 6,
name: "Angela",
departmentId: 3,
computerId: 6
},
{
employeeId: 7,
name: "Kevin",
departmentId: 3,
computerId: 7
}
],
departments: [
{
departmentId: 1,
name: "Sales"
},
{
departmentId: 2,
name: "Secretarial"
},
{
departmentId: 3,
name: "Accounting"
},
{
departmentId: 4,
name: "Management"
}
],
computers: [
{
computerId: 1,
type: "PC"
},
{
computerId: 2,
type: "PC"
},
{
computerId: 3,
type: "PC"
},
{
computerId: 4,
type: "PC"
},
{
computerId: 5,
type: "PC"
},
{
computerId: 6,
type: "PC"
},
{
computerId: 7,
type: "PC"
}
]
};
export default businessData;<file_sep>/browserify-practice/browserify-contact-list/src/scripts/contact.js
// Component that displays a person's name, phone number, and address.
const contact = {
contactBuilder(contactObject) {
let contactArticle = document.createElement("article");
let contactName = document.createElement("h3");
contactName.textContent = contactObject.name;
let contactNumber = document.createElement("p");
contactNumber.textContent = contactObject["phone-number"];
let contactAddress = document.createElement("p");
contactAddress.textContent = contactObject.address;
contactArticle.appendChild(contactName);
contactArticle.appendChild(contactNumber);
contactArticle.appendChild(contactAddress);
return contactArticle;
}
};
export default contact;<file_sep>/daily-journal/src/scripts/main.js
// This module calls on the functionality methods from journal.js
import journal from "./journal";
journal.journalify();
journal.handleRadioFilter();<file_sep>/react/kennel/src/components/location/LocationList.js
import React, { Component } from 'react'
import { Link } from 'react-router-dom'
import EmployeeCard from "../employee/EmployeeCard"
import "./Location.css"
export default class LocationList extends Component {
render() {
return (
<React.Fragment>
<section className='locations'>
{
this.props.locations.map(location =>
<div key={location.id} className="card">
<div className="card-title">
{location.name} at {location.address}
<Link className="nav-Link" to={`/locations/${location.id}`}>Details</Link>
</div>
<div className="employees--location">
{
this.props.employees
.filter(empl => empl.locationId === location.id)
.map(empl => <EmployeeCard {...this.props} key={empl.id} employee={empl}/>)
}
</div>
</div>
)
}
</section>
</React.Fragment>
);
}
}<file_sep>/browserify-practice/browserify-product-ordering/src/scripts/ProductList.js
// This module creates the DOM component for the list of products and then appends them to the DOM when called
import Product from "./Product";
import ProductData from "./ProductData";
const ProductList = {
appendProductComponents () {
ProductData.getProductData()
.then(parsedResponse => {
let productsFragment = document.createDocumentFragment();
parsedResponse.forEach(productObject => {
let productComponent = Product.buildProduct(productObject);
productsFragment.appendChild(productComponent);
});
let output = document.querySelector(".output");
output.appendChild(productsFragment);
});
}
};
export default ProductList;<file_sep>/kennel-company/src/LocationList.js
import React, { Component } from "react";
import Location from "./Location";
export default class LocationList extends Component {
state = {
locations: [
{ id: 1, name: "<NAME>" },
{ id: 2, name: "<NAME>" }
]
};
// componentDidMount () {
// getAll("locations")
// .then(locations => this.setState({ locations: locations }))
// }
render() {
return (
<React.Fragment>
{this.state.locations.map(location => <Location key={location.id} location={location}/>)}
</React.Fragment>
);
}
}
<file_sep>/browserify-practice/browserify-product-ordering/src/scripts/Nav.js
// This module builds the navigation bar and appends it to the DOM when called
const Nav = {
buildNavbar () {
const navContainer = document.createElement("nav");
navContainer.classList.add("navBar");
const navList = document.createElement("ul");
navContainer.appendChild(navList);
const companyItem = document.createElement("li");
companyItem.innerHTML = '<a class = "navItem companyItem" href="#">Betsy</a>'
const navLink1 = document.createElement("li");
navLink1.innerHTML = '<a class = "navItem" href="#">Categories</a>'
const navLink2 = document.createElement("li");
navLink2.innerHTML = '<a class = "navItem" href="#">Orders</a>'
const navLink3 = document.createElement("li");
navLink3.innerHTML = '<a class = "navItem" href="#">Logout</a>'
navList.appendChild(companyItem);
navList.appendChild(navLink1);
navList.appendChild(navLink2);
navList.appendChild(navLink3);
const output = document.querySelector(".output");
output.appendChild(navContainer);
}
};
export default Nav;<file_sep>/react/kennel/src/components/Kennel.js
import React, { Component } from "react"
import NavBar from "./nav/NavBar"
import ApplicationViews from "./ApplicationViews"
// import SearchManager from "./search/SearchManager"
import "./Kennel.css"
import "bootstrap/dist/css/bootstrap.min.css"
export default class Kennel extends Component {
state = {
animals: [],
employees: [],
locations: []
}
// searchAllData = (searchQuery) => {
// const newSearchResults = {};
// return SearchManager.searchAnimlas(searchQuery)
// .then(response => newSearchResults.animals = response)
// .then(() => SearchManager.searchEmployees(searchQuery)
// .then(response => )
// }
// }
render() {
return (
<React.Fragment>
<NavBar searchAllData={this.searchAllData} />
<ApplicationViews results={this.state.searchResults} />
</React.Fragment>
)
}
}<file_sep>/daily-journal/src/scripts/domComponent.js
// This module is responsible for creating the DOM components
import journal from "./journal";
const domComponent = {
makeFormComponent () {
const output = $("#output");
$("<h1>").text("Daily Journal").appendTo(output);
const formContainer = $("<form>").appendTo(output);
$("<fieldset>").append($("<label>", {for: "journalDate"}).text("Date of Entry"))
.append($("<input>", {type: "date", name: "journalDate", id: "journalDate"}))
.appendTo(formContainer);
$("<fieldset>").append($("<label>", {for: "conceptsCovered"}).text("Concepts Covered"))
.append($("<input>", {type: "text", name: "conceptsCovered", id: "conceptsCovered"}))
.appendTo(formContainer);
$("<fieldset>").append($("<label>", {for: "journalEntry"}).text("Journal Entry"))
.append($("<textarea>", {type: "text", name: "journalEntry", wrap: "soft", id: "journalEntry"}))
.appendTo(formContainer);
$("<fieldset>").append($("<label>", {for: "moodForDay"}).text("Mood for the Day"))
.append($("<select>", {name: "moodForDay", id: "moodForDay"})
.append($("<option>", {value: "Happy"}).text("Happy"))
.append($("<option>", {value: "Sad"}).text("Sad"))
.append($("<option>", {value: "Ecstatic"}).text("Ecstatic"))
.append($("<option>", {value: "Miserable"}).text("Miserable"))
)
.appendTo(formContainer);
$("<button>", {type: "button", id: "saveButton"}).text("Record Journal Entry").click( () => journal.handleSaveButton)
.appendTo(formContainer);
$("<fieldset>").append($("<legend>").text("Filter Journal Entries by Mood"))
.append($("<div>", {id: "radio"})
.append($("<div>")
.append($("<input>", {type: "radio", name: "filter", value: "Happy"}))
.append($("<label>").text("Happy"))
)
.append($("<div>")
.append($("<input>", {type: "radio", name: "filter", value: "Sad"}))
.append($("<label>").text("Sad"))
)
.append($("<div>")
.append($("<input>", {type: "radio", name: "filter", value: "Ecstatic"}))
.append($("<label>").text("Ecstatic"))
)
.append($("<div>")
.append($("<input>", {type: "radio", name: "filter", value: "Miserable"}))
.append($("<label>").text("Miserable"))
)
)
.appendTo(formContainer);
},
makeJournalEntryComponent (entry) {
const div = $("<div>").append($("<h2>").text(entry.concept))
.append($("<p>").text(entry.entry))
.append($("<p>").text(`Date of Entry: ${entry.date}`))
.append($("<p>").text(`Mood for the Day: ${entry.mood}`));
return div;
}
};
export default domComponent;
<file_sep>/browserify-practice/browserify-contact-list/src/scripts/contactList.js
// Component that displays all contacts. It should import the Contact component and the ContactCollection component.
import contactCollection from "./contactCollection";
import contact from "./contact";
const contactList = {
contactify() {
contactCollection.getAllContacts()
.then(parsedResponse => {
let contactFragment = document.createDocumentFragment();
parsedResponse.forEach(contactItem => {
let contactToAppend = contact.contactBuilder(contactItem);
contactFragment.appendChild(contactToAppend);
});
const outputArticle = document.querySelector(".output");
outputArticle.appendChild(contactFragment);
});
}
};
export default contactList;<file_sep>/react/kennel/src/components/employee/EmployeeList.js
import React, { Component } from 'react'
import { Link } from "react-router-dom"
import employeeIcon from "./employeeicon.png"
import AnimalCard from "../animal/AnimalCard"
import "./Employee.css"
export default class EmployeeList extends Component {
render() {
return (
<React.Fragment>
<div className="employeeButton">
<button type="button"
className="btn btn-success"
onClick={() => {
this.props.history.push("/employees/new")}
}>
Hire Employee
</button>
</div>
<section className="employees">
{
this.props.employees.map(employee =>
<div key={employee.id} className="card">
<div className="card-body">
<h5 className ="card-title">
<img src={employeeIcon} className="icon--employee" alt="employee icon" />
{employee.name}
<Link className="nav-link" to={`/employees/${employee.id}`}>Details</Link>
<button className="card-link" onClick={() => this.props.deleteEmployee(employee.id)}>Fire
</button>
</h5>
<h6 className="card-subtitle mb-2 text-muted">Caretaker For</h6>
<div className="animals--caretaker">
{
this.props.animals
.filter(anml => anml.employeeId === employee.id)
.map(anml => <AnimalCard key={anml.id} animal={anml} {...this.props} />)
}
</div>
</div>
</div>
)
}
</section>
</React.Fragment>
);
}
}
<file_sep>/kennel-company/src/EditAnimal.js
import React, { Component } from "react";
import ApiManager from "./ApiManager"
export default class EditAnimal extends Component {
// Set initial state
state = {
name: this.props.animal.name,
breed: this.props.animal.breed
};
// Update state whenever an input field is edited
handleFieldChange = evt => {
const stateToChange = {};
stateToChange[evt.target.id] = evt.target.value;
this.setState(stateToChange);
};
handleUpdate = e => {
e.preventDefault();
const updatedAnimal = {name: this.state.name, breed: this.state.breed}
ApiManager.updateItem("animals", this.props.animal.id, updatedAnimal)
.then(() => {
this.props.history.push("/animals");
})
};
render() {
return (
<form onSubmit={this.handleUpdate}>
<h1 className="h3 mb-3 font-weight-normal">Edit Animal</h1>
<label htmlFor="inputName">Animal Name</label>
<input
value={this.state.name}
onChange={this.handleFieldChange}
type="text"
id="name"
placeholder="Name"
required=""
autoFocus=""
/>
<label htmlFor="inputBreed">Animal Breed</label>
<input
value={this.state.breed}
onChange={this.handleFieldChange}
type="text"
id="breed"
placeholder="Breed"
required=""
/>
<button type="submit">Update</button>
</form>
);
}
}
<file_sep>/html/semantic-html/index.js
var thing = "i'm a lasagna hog".split("").reverse().join("");
document.Write("yo");<file_sep>/react/kennel/src/components/search/SearchResults.js
import React, { Component } from 'react'
export default class SearchResults extends Component {
render () {
return (
<React.Fragment>
<section className="search--results">
{
this.props.results.map(result =>
result.map(re => (
<p key={this.id}>{re.name}</p>
))
)
}
</section>
</React.Fragment>
)
}
}<file_sep>/javascript/array-methods/agent.js
outEl.innerHTML += "<h1>Purchasing Agents</h1>";
/*
Using map(), you extract the purchasing agent object
from each business and store it in a new array
*/
// const agents = businesses.map(business => {
// return business.purchasingAgent
// })
// console.table(agents)
// agents.forEach(agent => {
// outEl.innerHTML += `<h2>${agent.nameFirst} ${agent.nameLast}</h2>`;
// outEl.innerHTML += "<hr/>";
// });
// Lightning Exercise: Instead of just returning the purchasing agent object, return a new object that has the full name of the purchasing agent, the company name, and the phone number.
// The data structure is shown below. Use that new data structure to display the agent with their company and phone number
const newAgent = businesses.map(business => {
let newObject = {
fullname: `${business.purchasingAgent.nameFirst} ${business.purchasingAgent.nameLast}`,
company: business.companyName,
phoneNumber: business.phoneWork
};
return newObject;
});
console.table(newAgent);
newAgent.forEach(agent => {
outEl.innerHTML += `<h2>${agent.fullname}</h2>
<section>${agent.company}</section>
<section>${agent.phoneNumber}</section>
<hr/>`;
});<file_sep>/kennel-company/src/ApplicationViews.js
import { Route } from "react-router-dom";
import React, { Component } from "react";
import AnimalList from "./AnimalList";
import LocationList from "./LocationList";
import EmployeeList from "./EmployeeList";
import Animal from "./Animal";
import Location from "./Location";
import Employee from "./Employee";
import Login from "./Login"
import EditAnimal from "./EditAnimal"
import Auth from "./Auth/Auth"
const auth = new Auth()
const handleAuthentication = ({location}) => {
if (/access_token|id_token|error/.test(location.hash)) {
auth.handleAuthentication();
}
}
const { isAuthenticated } = auth
export default class ApplicationViews extends Component {
// Check if credentials are in local storage
render() {
return (
<React.Fragment>
<Route exact path="/" render={props => {
if (isAuthenticated()) {
return <LocationList />
} else {
return <Login auth={auth} {...props}/>
}
}} />
<Route path="/locations/:locationId" render={(props) => {
return <Location location={props.location.state.location} />
}} />
<Route exact path="/animals" render={(props) => {
return <AnimalList animals={this.state.animals}/>
}} />
<Route exact path="/animals/:animalId" render={(props) => {
return <Animal animal={props.location.state.animal} {...props}/>
}} />
<Route path="/animals/:animalId/edit" render={(props) => {
return <EditAnimal animal={props.location.state.animal} {...props}/>
}} />
<Route exact path="/employees" component={EmployeeList} />
<Route path="/employees/:employeeId" render={(props) => {
return <Employee employee={props.location.state.employee}>
{props.location.state.employee.name}
</Employee>
}} />
<Route path="/login" component={Login} />
<Route path="/callback" render={(props) => {
handleAuthentication(props)
return <LocationList {...props} />
}} />
</React.Fragment>
);
}
}
<file_sep>/javascript/javascript-components/README.md
# javascript-components
<file_sep>/javascript/events-wonder/powers.js
// document.querySelector("#activate-flight").addEventListener("click", function flightHandlerFunction() {
// document.querySelector("#flight").classList.remove("disabled");
// document.querySelector("#flight").classList.add("enabled");
// });
// document.querySelector("#activate-mindreading").addEventListener("click", function mindreadingHandlerFunction() {
// document.querySelector("#mindreading").classList.remove("disabled");
// document.querySelector("#mindreading").classList.add("enabled");
// });
// document.querySelector("#activate-xray").addEventListener("click", function xrayHandlerFunction() {
// document.querySelector("#xray").classList.remove("disabled");
// document.querySelector("#xray").classList.add("enabled");
// });
document.querySelector("#activate-all").addEventListener("click", function activateAllFunction() {
const items = document.querySelectorAll(".power");
items.forEach(function(item) {
item.classList.remove("disabled");
item.classList.add("enabled");
})
});
document.querySelector("#deactivate-all").addEventListener("click", function deactivateAllFunction() {
const items = document.querySelectorAll(".power");
items.forEach(function(item) {
item.classList.add("disabled");
item.classList.remove("enabled");
})
});
// CHALLENGE 1
const makeItHappen = () => {
let power = event.target.id.split("-")[1];
document.querySelector(`#${power}`).classList.remove("disabled");
document.querySelector(`#${power}`).classList.add("enabled");
}
document.querySelector("#activate-flight").addEventListener("click", makeItHappen);
document.querySelector("#activate-mindreading").addEventListener("click", makeItHappen);
document.querySelector("#activate-xray").addEventListener("click", makeItHappen);
<file_sep>/browserify-practice/browserify-contact-list/src/scripts/contactForm.js
// Component that, when filled out and a submit button is pressed, adds a new contact to storage. It should import the ContactCollection component.
import contactCollection from "./contactCollection";
const contactForm = {
createDomElement({elementType, content = null, attributes = {} }) {
const element = document.createElement(elementType);
element.textContent = content;
for (let key in attributes) {
element.setAttribute(key, attributes[key]);
};
return element;
},
makeForm() {
const outputArticle = document.querySelector(".output");
const formToAppend = document.createElement("form");
let nameInput = this.createDomElement({
elementType: "input",
attributes: {
type: "text",
class: "nameInput"
}
});
let numberInput = this.createDomElement({
elementType: "input",
attributes: {
type: "text",
class: "numberInput"
}
});
let addressInput = this.createDomElement({
elementType: "input",
attributes: {
type: "text",
class: "addressInput"
}
});
let saveButton = this.createDomElement({
elementType: "button",
content: "Save",
attributes: {
class: "saveButton"
}
});
saveButton.addEventListener("click", this.saveContactToJson);
formToAppend.appendChild(nameInput);
formToAppend.appendChild(numberInput);
formToAppend.appendChild(addressInput);
formToAppend.appendChild(saveButton);
outputArticle.appendChild(formToAppend);
},
saveContactToJson() {
let newContactName = document.querySelector(".nameInput");
let newContactNumber = document.querySelector(".numberInput");
let newContactAddress = document.querySelector(".addressInput");
let newContact = {
"name": newContactName.value,
"phone-number": newContactNumber.value,
"address": newContactAddress.value
};
contactCollection.saveAContact(newContact);
}
};
export default contactForm;<file_sep>/kennel-company/src/ApiManager.js
const ApiManager = Object.create({}, {
getAll: {
value: (collectionName) => {
return fetch(`http://localhost:5002/${collectionName}`)
.then(e => e.json())
}
},
deleteItem: {
value: (collectionName, itemId) => {
return fetch(`http://localhost:5002/${collectionName}/${itemId}`, {
method: "DELETE"
})
}
},
updateItem: {
value: (collectionName, itemId, dataObject) => {
return fetch(`http://localhost:5002/${collectionName}/${itemId}`, {
method: "PUT",
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify(dataObject)
});
}
}
})
export default ApiManager
<file_sep>/ternary-traveler/src/scripts/main.js
// The purpose of the module is
import domBuilder from "./domBuilder";
domBuilder.createInputForm();
domBuilder.appendPointCards();<file_sep>/react/kennel/src/components/search/SearchManager.js
const remoteURL = "http://localhost:5002"
export default {
searchAnimals(query) {
return fetch(`${remoteURL}/animals?q=${query}`)
.then(e => e.json())
},
searchEmployees(query) {
return fetch(`${remoteURL}/employees?q=${query}`)
.then(e => e.json())
},
searchOwners(query) {
return fetch(`${remoteURL}/owners?q=${query}`)
.then(e => e.json())
},
searchAll(query) {
return Promise.all([this.searchAnimals(query), this.searchEmployees(query), this.searchOwners(query)])
}
}<file_sep>/javascript/array-methods-chaining/chaining.js
/*
Requirements:
Using one single line of JavaScript code, complete the following tasks on the array of integers below.
Sort the numbers in descending order (10, 9, 8, 7, etc).
Remove any integers greater than 19.
Multiply each remaining number by 1.5 and then subtract 1.
Then output (either in the DOM or the console) the sum of all the resulting numbers.
*/
const integers = [13, 25, 6, 3, 11, 2, 18, 7, 21, 1, 29, 20, 12, 8];
const newArray = integers.sort( (a, b) => {
return b - a;
}).filter(integer => {
if (integer <= 19) {
return integer;
};
}).map(integer => {
return (integer * 1.5) -1;
}).reduce((total, next) => total += next,
0
);
console.log(newArray);
<file_sep>/javascript/events-mirror/main.js
// Create an input field in your DOM. Give it an id of message.
// Create two article elements with unique id values. Use Flexbox to display them in a row, each taking 50% of the width of the browser.
// Give each article a different border color.
// Write an event listener that listens for the keyup event on the input field.
// The event handler function should update the textContent property of both sections.
const fragment = document.createDocumentFragment();
const input = document.createElement("input");
input.setAttribute("id", "message");
input.setAttribute("type", "text");
fragment.appendChild(input);
const div = document.createElement("div");
div.className = "parent-container";
fragment.appendChild(div);
document.querySelector("body").appendChild(fragment);
const article1 = document.createElement("article");
article1.setAttribute("id", "article1");
article1.textContent = "First Article";
div.appendChild(article1);
const article2 = document.createElement("article");
article2.setAttribute("id", "article2");
article2.textContent = "Second Article";
div.appendChild(article2);
input.addEventListener("keyup", function() {
article1.textContent = event.target.value;
article2.textContent = event.target.value;
})
<file_sep>/ternary-traveler/src/scripts/eventListeners.js
import data from "./data";
import domBuilder from "./domBuilder";
// The purpose of this module is to create all the event listeners functions to be used in the app
const eventListeners = {
handleSaveButton() {
const nameInputted = $("#pointName").val();
const descriptionInputted = $("#pointDescription").val();
const costInputted = $("#pointCost").val();
const locationSelected = $("#pointLocation").val();
data.fetchRequest({
dataSet: "places",
fetchType: "GET",
})
.then(locations => {
locations.forEach(location => {
if (location.name === locationSelected) {
data.fetchRequest({
dataSet: "interests",
fetchType: "POST",
databaseObject: {
placeId: location.id,
name: nameInputted,
description: descriptionInputted,
cost: costInputted,
review: "",
}
})
.then( () => {
domBuilder.appendPointCards();
});
};
});
});
},
handleDeleteButton() {
const currentContainerId = event.target.parentNode.id.split("--")[1];
const confirmationModal = domBuilder.createConfirmationPopup(currentContainerId);
$("#output").append(confirmationModal);
},
handleConfirmButton() {
const idToDelete = event.target.parentNode.id.split("--")[1];
const currentContainerId = event.target.parentNode.id;
data.fetchRequest({
dataSet: "interests",
fetchType: "DELETE",
specificId: idToDelete
})
.then( () => {
domBuilder.appendPointCards();
$(`#${currentContainerId}`).remove();
});
},
handleDenyButton() {
const currentContainerId = event.target.parentNode.id;
$(`#${currentContainerId}`).remove();
},
handleEditButton() {
const idToEdit = event.target.parentNode.id.split("--")[1];
data.fetchRequest({
dataSet: "interests",
fetchType: "GET",
specificId: idToEdit
})
.then(interest => {
domBuilder.createEditForm(idToEdit, interest);
});
},
handleSubmitButton() {
const currentEditId = event.target.parentNode.id.split("--")[1];
data.fetchRequest({
dataSet: "interests",
fetchType: "GET",
specificId: currentEditId
})
.then(interest => {
data.fetchRequest({
dataSet: "interests",
fetchType: "PUT",
specificId: currentEditId,
databaseObject: {
id: interest.id,
placeid: interest.placeId,
name: interest.name,
description: interest.description,
cost: $(`#pointCostEdit--${currentEditId}`).val(),
review: $(`#pointReviewEdit--${currentEditId}`).val()
}
})
.then( () => {
domBuilder.appendPointCards();
});
});
}
};
export default eventListeners;<file_sep>/browserify-practice/browserify-product-ordering/src/scripts/ProductData.js
// This module fetches the product data from the API
const ProductData = {
getProductData () {
return fetch("http://localhost:8088/products")
.then(response => response.json());
}
};
export default ProductData;<file_sep>/browserify-practice/browserify-product-ordering/src/scripts/ReviewList.js
import ReviewData from "./ReviewData";
import Review from "./Review";
const ReviewList = {
fillReviewComponents (productObject) {
ReviewData.getReviewData()
.then(parsedResponse => {
parsedResponse.forEach(review => {
if (review.productId === productObject.id) {
Review.createReviewComponent(review);
}
});
});
}
};
export default ReviewList;<file_sep>/kennel-company/src/EmployeeList.js
import React, { Component } from "react"
import Employee from "./Employee";
export default class EmployeeList extends Component {
state = {
employees: []
}
componentDidMount () {
fetch("http://localhost:5002/employees")
.then(e => e.json())
.then(employees => this.setState({ employees: employees }))
}
render() {
return (
<React.Fragment>
{
this.state.employees.map(employee =>
<Employee key={employee.id} employee={employee} foo="hello">
{employee.name}
</Employee>
)
}
</React.Fragment>
)
}
}
<file_sep>/react/student-exercise/src/modules/APIManager.js
const remoteURL = "http://localhost:5002"
export default {
getAll() {
return fetch(`${remoteURL}/students`).then(data => data.json())
}
}
|
94451616a168e1d9a5d61860d3ccec78b4a363fd
|
[
"Markdown",
"JavaScript"
] | 54 |
Markdown
|
colebryant/bootcamp-frontend
|
c830cd61aba1b98b18dbd5027af86b63e69f840b
|
d5194fd3dbec58761e47cc4d93c2f7cd62139dec
|
refs/heads/master
|
<repo_name>conens021/javafxEmployeesApp<file_sep>/README.md
# javafxEmployeesApp
This is demo project for CRUD operations in javafx
<file_sep>/Company/src/main/Company.java
package main;
import javafx.application.Application;
import javafx.event.EventHandler;
import javafx.fxml.FXMLLoader;
import javafx.scene.Parent;
import javafx.scene.Scene;
import javafx.scene.input.MouseEvent;
import javafx.stage.Stage;
import javafx.stage.StageStyle;
public class Company extends Application {
double dragOffsetX;
double dragOffsetY;
@Override
public void start(Stage stage) throws Exception {
Parent root = FXMLLoader.load(getClass().getResource("/view/FXMLDocument.fxml"));
Scene scene = new Scene(root);
stage.initStyle(StageStyle.UNDECORATED);
scene.setOnMousePressed(new EventHandler<MouseEvent>() {
@Override
public void handle(MouseEvent event) {
dragOffsetX = event.getScreenX() - stage.getX();
dragOffsetY = event.getScreenY() - stage.getY();
}
});
scene.setOnMouseDragged(new EventHandler<MouseEvent>() {
@Override
public void handle(MouseEvent event) {
stage.setX(event.getScreenX() - dragOffsetX);
stage.setY(event.getScreenY() - dragOffsetY);
}
});
stage.setScene(scene);
stage.show();
}
public static void main(String[] args) {
launch(args);
}
}
<file_sep>/Company/src/model/Employees.java
package model;
import javafx.beans.property.DoubleProperty;
import javafx.beans.property.IntegerProperty;
import javafx.beans.property.SimpleDoubleProperty;
import javafx.beans.property.SimpleIntegerProperty;
import javafx.beans.property.SimpleStringProperty;
import javafx.beans.property.StringProperty;
public class Employees {
StringProperty name = new SimpleStringProperty(this, "name", "");
StringProperty address = new SimpleStringProperty(this, "address", "");
IntegerProperty id = new SimpleIntegerProperty(this, "id", 0);
IntegerProperty age = new SimpleIntegerProperty(this, "age", 0);
DoubleProperty income = new SimpleDoubleProperty(this, "income", 0.0);
//KONSTRUKTORI
public Employees() {
this.name.set("");
this.address.set("");
this.id.set(0);
this.age.set(0);
this.income.set(0.0);
}
public Employees(int id, String name, String address, int age, double income) {
this.id.set(id);
this.name.set(name);
this.address.set(address);
this.age.set(age);
this.income.set(income);
}
public Employees(String name, String address, int age, double income) {
this.name.set(name);
this.address.set(address);
this.age.set(age);
this.income.set(income);
}
//SET METODE
public void setName(String name) {
this.name.set(name);
}
public void setAddress(String address) {
this.address.set(address);
}
public void setId(int id) {
this.id.set(id);
}
public void setAge(int age) {
this.age.set(age);
}
public void setIncome(double income) {
this.income.set(income);
}
//SET METODE
public String getName() {
return this.name.get();
}
public String getAddress() {
return this.address.get();
}
public int getId() {
return this.id.get();
}
public int getAge() {
return this.age.get();
}
public double getIncome() {
return this.income.get();
}
//Property
public StringProperty nameProperty() {
return this.name;
}
public StringProperty addressProperty() {
return this.address;
}
public IntegerProperty idProperty() {
return this.id;
}
public IntegerProperty ageProperty() {
return this.age;
}
public DoubleProperty incomeProperty() {
return this.income;
}
public boolean inputIsValid() {
boolean validate = true;
if (name.get().isEmpty() || name.get().equals("")) {
validate = false;
}
if (address.get().isEmpty() || name.get().equals("")) {
validate = false;
}
if (age.get() <= 0) {
validate = false;
}
if (income.get() <= 0) {
validate = false;
}
return validate;
}
public String saveEmployee() {
if (inputIsValid()) {
return "Employee:\n" + "name:" + this.getName() + "\naddress:" + this.getAddress() + "\nage:" + this.getAge() + "\nincome:" + this.getIncome() + "\nHas been created!";
} else {
return "Please fill all fields!";
}
}
@Override
public String toString() {
return "name:" + this.getName() + "\naddress:" + this.getAddress() + "\nage:" + this.getAge() + "\nincome:" + this.getIncome();
}
}
<file_sep>/Company/src/controller/FXMLDocumentController.java
package controller;
import com.jfoenix.controls.JFXButton;
import com.jfoenix.controls.JFXRadioButton;
import com.jfoenix.controls.JFXTextField;
import java.sql.Statement;
import java.io.IOException;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import javafx.application.Platform;
import javafx.collections.FXCollections;
import javafx.collections.ObservableList;
import javafx.event.ActionEvent;
import javafx.fxml.FXML;
import javafx.scene.control.Alert;
import javafx.scene.control.ButtonType;
import javafx.scene.control.Label;
import javafx.scene.control.TableView;
import javafx.scene.control.ToggleGroup;
import javafx.stage.Stage;
import model.Employees;
public class FXMLDocumentController {
@FXML
private TableView table;
@FXML
private JFXTextField nameTxtInput, addrsTxtInput, ageTxtField, incomeTxtInput, findByTxtInput;
@FXML
private JFXTextField ageFromTxt, ageBellowTxt, fromAndAge, bellowAndAge,idFromTxt,idBellowTxt,fromAndId,bellowAndId;
@FXML
private Label ageError, incomeError;
@FXML
private ToggleGroup findByGroup;
@FXML
private JFXRadioButton nameToggle, ageToggle, idToggle, addressToggle;
@FXML
private JFXButton fromAge, bellowAge, fromAndBellowAge,fromId,bellowId,fromAndBellowId;
private Stage stage;
public static final String USERNAME = "root";
public static final String PASSWORD = "<PASSWORD>";
private static final String CONNECTION = "jdbc:mysql://localhost:3306/company";
Connection conn;
String query;
StringBuilder value;
int age, age2;
Employees employee;
ObservableList<Employees> employeesList = FXCollections.<Employees>observableArrayList();
public void initialize() throws SQLException {
try {
conn = DriverManager.getConnection(CONNECTION, USERNAME, PASSWORD);
} catch (SQLException e) {
System.out.println(e.getMessage());
}
table.getSelectionModel().selectedItemProperty().addListener((obs, oldSelection, newSelection) -> {
if (newSelection != null) {
Employees empl = (Employees) table.getSelectionModel().getSelectedItem();
nameTxtInput.setText(empl.getName());
addrsTxtInput.setText(empl.getAddress());
ageTxtField.setText(String.valueOf(empl.getAge()));
incomeTxtInput.setText(String.valueOf(empl.getIncome()));
}
});
nameToggle.setUserData("Name");
idToggle.setUserData("ID");
addressToggle.setUserData("Address");
ageToggle.setUserData("Age");
}
//METODE ZA DODAVANJE,IZMENU I BRISANJE ZAPOSLENIH
//KREIRANJE OBJEKATA I SMESTANJE U BAZU
@FXML
private void insertEmp() throws SQLException {
PreparedStatement st = conn.prepareStatement("insert into employees(name,address,age,income) values (?,?,?,?)");
//KREIRANJE OBJEKTA I SMESTANJE U TABELU
employee = new Employees();
employee.setName(nameTxtInput.getText());
employee.setAddress(addrsTxtInput.getText());
try {
employee.setAge(Integer.parseInt(ageTxtField.getText()));
} catch (NumberFormatException e) {
ageError.setVisible(true);
}
try {
employee.setIncome(Double.parseDouble(incomeTxtInput.getText()));
} catch (NumberFormatException e) {
incomeError.setVisible(true);
}
if (employee.inputIsValid()) {
//UBACIVANJE ZAPOSLENOG U BAZU PODATAKA
st.setString(1, employee.getName());
st.setString(2, employee.getAddress());
st.setString(3, String.valueOf(employee.getAge()));
st.setString(4, String.valueOf(employee.getIncome()));
st.execute();
//POSTAVLJANJE ID-A ZAPOSLENOG
ResultSet rs = st.executeQuery("select last_insert_id() as id from employees");
rs.next();
employee.setId(Integer.valueOf(rs.getString("id")));
//UBACIVANJE ZAPOSLENOG U LISTU I U TABELU
employeesList.add(new Employees(employee.getId(), employee.getName(), employee.getAddress(), employee.getAge(), employee.getIncome()));
table.setItems(employeesList);
Alert alert;
alert = new Alert(Alert.AlertType.INFORMATION, employee.saveEmployee(), ButtonType.OK);
alert.setHeaderText("Employee created successfuly!");
alert.show();
ageError.setVisible(false);
incomeError.setVisible(false);
} else {
Alert alert;
alert = new Alert(Alert.AlertType.ERROR, employee.saveEmployee(), ButtonType.OK);
alert.setHeaderText("Error");
alert.show();
}
}
//IZMENA ZAPOSLENOG
@FXML
private void updateEmployee() throws SQLException {
//PREUZIMANJE SELEKTOVANOG ITEMA I KREIRANJE NOVOG OBJEKTA
Employees empl = (Employees) table.getSelectionModel().getSelectedItem();
Employees updtEmpl = new Employees(empl.getId(), empl.getName(), empl.getAddress(), empl.getAge(), empl.getIncome());
//IZMENA VREDNOSTI
updtEmpl.setName(nameTxtInput.getText());
updtEmpl.setAddress(addrsTxtInput.getText());
try {
updtEmpl.setAge(Integer.parseInt(ageTxtField.getText()));
} catch (NumberFormatException e) {
ageError.setVisible(true);
}
try {
updtEmpl.setIncome(Double.parseDouble(incomeTxtInput.getText()));
} catch (NumberFormatException e) {
incomeError.setVisible(true);
}
//PROVERA ISPRAVNOSTI UNETIH PODATAKA I UBACIVANJE U BAZU I IZMENA OBJEKTA IZ TABELE
if (updtEmpl.inputIsValid()) {
Statement st = conn.createStatement();
st.execute("update employees set name='" + updtEmpl.getName() + "',address='" + updtEmpl.getAddress() + "',age='" + updtEmpl.getAge() + "',income='"
+ updtEmpl.getIncome() + "' where id=" + updtEmpl.getId());
table.getItems().remove(empl);
table.getItems().add(updtEmpl);
Alert alert = new Alert(Alert.AlertType.INFORMATION, "Employee:\n" + empl + "has been updated.", ButtonType.OK);
alert.setHeaderText("Employee updated successfuly");
alert.show();
ageError.setVisible(false);
incomeError.setVisible(false);
} else {
Alert alert = new Alert(Alert.AlertType.ERROR, "Please fill all fields!", ButtonType.OK);
alert.setHeaderText("Someting went wrong");
alert.show();
}
}
//BRISANJE ZAPOSLENOG
@FXML
private void deleteEmployee() {
Employees empl = (Employees) table.getSelectionModel().getSelectedItem();
Statement st;
try {
st = conn.createStatement();
st.execute("delete from employees where id = " + empl.getId());
table.getItems().remove(empl);
Alert alert = new Alert(Alert.AlertType.INFORMATION, "Employee\n" + empl + "\ndeleted successfuly!", ButtonType.OK);
alert.setHeaderText("Employee deleted");
alert.show();
} catch (SQLException ex) {
Alert alert = new Alert(Alert.AlertType.ERROR, ex.getMessage(), ButtonType.CLOSE);
alert.setHeaderText("Error");
alert.show();
}
}
@FXML
private void clearInputs() {
nameTxtInput.clear();
addrsTxtInput.clear();
ageTxtField.clear();
incomeTxtInput.clear();
}
//METODE ZA PRETRAGU
//izlistavanje svih zaposlenih
@FXML
private void listAll() {
employee = null;
//Ciscenje tabele
table.getItems().clear();
//Listanje podataka iz baze i ubacivanje u tabelu
try {
Statement st = conn.createStatement();
st.execute("select * from employees");
ResultSet rs = st.getResultSet();
ObservableList<Employees> allEmp = FXCollections.observableArrayList();
while (rs.next()) {
employee = new Employees(rs.getInt("id"), rs.getString("name"), rs.getString("address"), rs.getInt("age"), rs.getDouble("income"));
allEmp.add(employee);
}
table.getItems().addAll(allEmp);
} catch (SQLException ex) {
Alert alert = new Alert(Alert.AlertType.ERROR, ex.getMessage(), ButtonType.CLOSE);
alert.setHeaderText("Something went wrong");
alert.show();
}
}
//pretraga po kolonama
@FXML
private void findBy() {
query = "";
String toggle;
if (findByGroup.getSelectedToggle() != null) {
toggle = findByGroup.getSelectedToggle().getUserData().toString();
switch (toggle) {
case "Name":
query = "name";
break;
case "Address":
query = "address";
break;
case "Age":
query = "age";
break;
case "ID":
query = "id";
break;
}
value = new StringBuilder();
value.append("%");
String str = findByTxtInput.getText();
value.append(str);
value.append("%");
try {
Statement st = conn.createStatement();
st.execute("select * from employees where " + query + " LIKE '" + value + "'");
ResultSet rs = st.getResultSet();
ObservableList<Employees> findEmpl = FXCollections.observableArrayList();
while (rs.next()) {
employee = new Employees(rs.getInt("id"), rs.getString("name"), rs.getString("address"), rs.getInt("age"), rs.getDouble("income"));
findEmpl.add(employee);
}
table.getItems().clear();
table.setItems(findEmpl);
} catch (SQLException ex) {
System.out.println(ex.getMessage());
}
}
}
//pretraga po godinama
@FXML
private void findByAge(ActionEvent event) throws SQLException {
value = new StringBuilder();
query = "";
if (event.getSource() == fromAge) {
query = ageFromTxt.getText();
value.append("select * from employees where age > ");
value.append(query);
}
if (event.getSource() == bellowAge) {
query = ageBellowTxt.getText();
value.append("select * from employees where age < ");
value.append(query);
}
if (event.getSource() == fromAndBellowAge) {
query = fromAndAge.getText();
value.append("select * from employees where age BETWEEN ").append(query).append(" AND ").append(bellowAndAge.getText());
System.out.println(value.toString());
}
try {
Statement st = conn.createStatement();
st.execute(value.toString());
ResultSet rs = st.getResultSet();
ObservableList<Employees> findEmpl = FXCollections.observableArrayList();
while (rs.next()) {
employee = new Employees(rs.getInt("id"), rs.getString("name"), rs.getString("address"), rs.getInt("age"), rs.getDouble("income"));
findEmpl.add(employee);
}
table.getItems().clear();
table.setItems(findEmpl);
} catch (SQLException ex) {
System.out.println(ex.getMessage());
}
}
@FXML
private void findByID(ActionEvent event) throws SQLException {
value = new StringBuilder();
query = "";
if (event.getSource() == fromId) {
query = idFromTxt.getText();
value.append("select * from employees where id > ");
value.append(query);
}
if (event.getSource() == bellowId) {
query = idBellowTxt.getText();
value.append("select * from employees where id < ");
value.append(query);
}
if (event.getSource() == fromAndBellowId) {
query = fromAndId.getText();
value.append("select * from employees where id BETWEEN ").append(query).append(" AND ").append(bellowAndId.getText());
System.out.println(value.toString());
}
try {
Statement st = conn.createStatement();
st.execute(value.toString());
ResultSet rs = st.getResultSet();
ObservableList<Employees> findEmpl = FXCollections.observableArrayList();
while (rs.next()) {
employee = new Employees(rs.getInt("id"), rs.getString("name"), rs.getString("address"), rs.getInt("age"), rs.getDouble("income"));
findEmpl.add(employee);
}
table.getItems().clear();
table.setItems(findEmpl);
} catch (SQLException ex) {
System.out.println(ex.getMessage());
}
}
//<NAME>
@FXML
private void closeWindow() throws SQLException {
conn.close();
Platform.exit();
}
@FXML
private void minimizeWindow() throws IOException {
stage = (Stage) ageError.getScene().getWindow();
stage.setIconified(true);
}
}
<file_sep>/db/company_employees.sql
-- MySQL dump 10.13 Distrib 8.0.11, for Win64 (x86_64)
--
-- Host: localhost Database: company
-- ------------------------------------------------------
-- Server version 8.0.11
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
SET NAMES utf8 ;
/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
/*!40103 SET TIME_ZONE='+00:00' */;
/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
--
-- Table structure for table `employees`
--
DROP TABLE IF EXISTS `employees`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
SET character_set_client = utf8mb4 ;
CREATE TABLE `employees` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(45) CHARACTER SET utf8 COLLATE utf8_unicode_ci NOT NULL,
`age` int(11) NOT NULL,
`address` varchar(80) CHARACTER SET utf8 COLLATE utf8_unicode_ci NOT NULL,
`income` double NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=40 DEFAULT CHARSET=utf8;
/*!40101 SET character_set_client = @saved_cs_client */;
--
-- Dumping data for table `employees`
--
LOCK TABLES `employees` WRITE;
/*!40000 ALTER TABLE `employees` DISABLE KEYS */;
INSERT INTO `employees` VALUES (15,'<NAME>',23,'Bihacka 36',23132.32),(16,'<NAME>',20,'Bihacka 36',23112.21),(17,'<NAME>',52,'Negro Royal 308',321321312.21),(18,'Nemanja',23,'Rackovic',21213.21),(21,'<NAME>',51,'Gocina 32',1111.11),(23,'<NAME>',51,'Hoster 21',212112.21),(24,'<NAME>',41,'SDdsa as',211212.21),(27,'<NAME>',23,'Biohac',122112.21),(28,'<NAME>',23,'Bihaฤka',122112.21),(29,'<NAME>',12,'Vo<NAME>',211221.1),(31,'<NAME>',49,'Bihacka 36',12312),(36,'ฤuirฤiฤ',11,'ล okuฤiฤeva',212112),(37,'<NAME>',23,'Bihaฤka 36',25121.21),(38,'<NAME>',42,'Kolumska 42',222321),(39,'Nemanja',21,'Kossa 12',32);
/*!40000 ALTER TABLE `employees` ENABLE KEYS */;
UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-- Dump completed on 2018-05-19 15:01:04
|
e6afa6f5b3fe5347edd094b2d690ce01d71a8a6e
|
[
"Markdown",
"Java",
"SQL"
] | 5 |
Markdown
|
conens021/javafxEmployeesApp
|
7795e8bb846259fe56c5bc129cef08c6aaedd69b
|
07f5b06e3f1e7d81e6ead18e8c244a27f456a439
|
refs/heads/master
|
<file_sep>import React, { useEffect } from "react";
import {
StyleSheet,
Text,
View,
Button,
FlatList,
ActivityIndicator,
SafeAreaView,
} from "react-native";
import { getStudents } from "../actions/attendanceActions";
import { useDispatch, useSelector } from "react-redux";
import StudentCard from "../components/StudentCard";
const ListScreen = (props) => {
//const { navigation } = props;
const dispatch = useDispatch();
const { navigation } = props;
const recordId = props.navigation.getParam("id");
const studentsArray = useSelector((state) => state.students);
const { students, loading } = studentsArray;
const loadData = async () => {
dispatch(getStudents(recordId));
};
useEffect(() => {
loadData();
}, [recordId, navigation]);
const renderCard = ({ item }) => {
return (
<StudentCard
id={item._id}
name={item.name}
rollno={item.rollno}
branch={item.branch}
teacher={item.teacher}
/>
);
};
return (
<>
{loading ? (
<ActivityIndicator
size="large"
color="black"
style={{ marginVertical: 90 }}
/>
) : (
<SafeAreaView style={{ flex: 1 }}>
<View style={{ marginTop: 10, marginLeft: 50, marginRight: 50 }}>
<Button
title="ADD ATTENDANCE"
onPress={() => {
props.navigation.navigate({
routeName: "Scanner",
params: {
courseId: recordId,
},
});
}}
color="black"
/>
</View>
<FlatList
data={students.students}
keyExtractor={(item) => item._id}
renderItem={renderCard}
/>
</SafeAreaView>
)}
</>
);
};
ListScreen.navigationOptions = (navigationData) => {
const headerName = navigationData.navigation.getParam("course");
return {
headerTitle: ` Attendance for ${headerName}`,
};
};
export default ListScreen;
const styles = StyleSheet.create({});
<file_sep>import React, { useEffect, useState } from "react";
import {
StyleSheet,
Text,
View,
TextInput,
TouchableOpacity,
Alert,
ActivityIndicator,
} from "react-native";
import { useDispatch, useSelector } from "react-redux";
import { login } from "../actions/teacherActions";
import { showMessage } from "react-native-flash-message";
const LoginScreen = (props) => {
const [email, setEmail] = useState("");
const [password, setPassword] = useState("");
const dispatch = useDispatch();
const info = useSelector((state) => state.teacherLogin);
const { loading, error, success, teacherInfo } = info;
const submitHandler = async () => {
dispatch(login(email, password));
if (!loading) {
props.navigation.navigate({
routeName: "Menu",
params: {
userName: teacherInfo.name,
},
});
} else if (error) {
Alert.alert(
"Unregistered Login",
"The credentials provided doesn't match any user in the database",
[
{
text: "Ok",
onPress: () => console.log("Ok pressed"),
},
{
text: "Register",
onPress: () => props.navigation.navigate("Register"),
},
]
);
}
};
return (
<View style={styles.container}>
{loading ? (
<ActivityIndicator size="large" color="black" />
) : (
<>
<Text style={styles.formLabel}> Login </Text>
<View>
<Text styles={styles.label}>Email</Text>
<TextInput
placeholder="Enter Email"
style={styles.inputStyle}
value={email}
onChangeText={setEmail}
/>
<Text styles={styles.label}>Password</Text>
<TextInput
secureTextEntry={true}
placeholder="Enter Password"
style={styles.inputStyle}
value={password}
onChangeText={setPassword}
/>
<TouchableOpacity
onPress={submitHandler}
style={styles.appButtonContainer1}
>
<Text style={styles.appButtonText}>LOGIN</Text>
</TouchableOpacity>
</View>
</>
)}
</View>
);
};
export default LoginScreen;
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: "#fff",
alignItems: "center",
justifyContent: "center",
height: 50,
},
label: {
marginBottom: -30,
},
appButtonContainer1: {
elevation: 8,
marginTop: 20,
backgroundColor: "#009688",
borderRadius: 10,
paddingVertical: 10,
paddingHorizontal: 12,
},
appButtonText: {
fontSize: 18,
color: "#fff",
fontWeight: "bold",
alignSelf: "center",
textTransform: "uppercase",
},
formLabel: {
fontSize: 20,
color: "black",
marginBottom: 40,
marginTop: -200,
},
inputStyle: {
marginTop: 10,
width: 300,
height: 40,
marginBottom: 10,
paddingHorizontal: 10,
borderRadius: 50,
backgroundColor: "#DCDCDC",
},
formText: {
alignItems: "center",
justifyContent: "center",
color: "#fff",
fontSize: 20,
},
text: {
color: "#fff",
fontSize: 20,
},
});
<file_sep>export const CREATE_TEACHER_REQUEST = "CREATE_TEACHER_REQUEST";
export const CREATE_TEACHER_SUCCESS = "CREATE_TEACHER_SUCCESS";
export const CREATE_TEACHER_FAIL = "CREATE_TEACHER_FAIL";
export const USER_LOGIN_SUCCESS = "USER_LOGIN_SUCCESS";
export const USER_LOGIN_REQUEST = "USER_LOGIN_REQUEST";
export const USER_LOGIN_FAIL = "USER_LOGIN_FAIL";
export const CREATE_ATTENDANCE_REQUEST = "CREATE_ATTENDANCE_REQUEST";
export const CREATE_ATTENDANCE_SUCCESS = "CREATE_ATTENDANCE_SUCCESS";
export const CREATE_ATTENDANCE_FAIL = "CREATE_ATTENDANCE_FAIL";
export const GET_ATTENDANCE_REQUEST = "GET_ATTENDANCE_REQUEST";
export const GET_ATTENDANCE_SUCCESS = "GET_ATTENDANCE_SUCCESS";
export const GET_ATTENDANCE_FAIL = "GET_ATTENDANCE_FAIL";
export const GET_TEACHER_RECORDS_REQUEST = "GET_TEACHER_RECORDS_REQUEST";
export const GET_TEACHER_RECORDS_SUCCESS = "GET_TEACHER_RECORDS_SUCCESS";
export const GET_TEACHER_RECORDS_FAIL = "GET_TEACHER_RECORDS_FAIL";
export const CREATE_SINGLE_RECORD = "CREATE_SINGLE_RECORD";
export const RECORDS_RESET = "RECORDS_RESET";
export const GET_ATTENDANCE_LIST_REQUEST = "GET_ATTENDANCE_LIST_REQUEST";
export const GET_ATTENDANCE_LIST_SUCCESS = "GET_ATTENDANCE_LIST_SUCCESS";
export const GET_ATTENDANCE_LIST_FAIL = "GET_ATTENDANCE_LIST_FAIL";
export const ADD_STUDENT_REQUEST = "ADD_STUDENT_REQUEST";
export const ADD_STUDENT_SUCCESS = "ADD_STUDENT_SUCCESS";
export const ADD_STUDENT_FAIL = "ADD_STUDENT_FAIL";
export const LOGOUT = "LOGOUT";
export const ATTENDANCE_DELETE_REQUEST = "ATTENDANCE_DELETE_REQUEST";
export const ATTENDANCE_DELETE_SUCCESS = "ATTENDANCE_DELETE_SUCCESS";
export const ATTENDANCE_DELETE_FAIL = "ATTENDANCE_DELETE_FAIL";
export const UPDATE_TEACHER_REQUEST = "UPDATE_TEACHER_REQUEST";
export const UPDATE_TEACHER_SUCCESS = "UPDATE_TEACHER_SUCCESS";
export const UPDATE_TEACHER_FAIL = "UPDATE_TEACHER_FAIL";
<file_sep>import {
CREATE_ATTENDANCE_FAIL,
CREATE_ATTENDANCE_REQUEST,
CREATE_ATTENDANCE_SUCCESS,
CREATE_SINGLE_RECORD,
GET_ATTENDANCE_SUCCESS,
GET_ATTENDANCE_FAIL,
GET_ATTENDANCE_REQUEST,
GET_ATTENDANCE_LIST_FAIL,
GET_ATTENDANCE_LIST_SUCCESS,
GET_ATTENDANCE_LIST_REQUEST,
RECORDS_RESET,
ADD_STUDENT_FAIL,
ADD_STUDENT_REQUEST,
ADD_STUDENT_SUCCESS,
ATTENDANCE_DELETE_FAIL,
ATTENDANCE_DELETE_REQUEST,
ATTENDANCE_DELETE_SUCCESS,
} from "../constants";
import axios from "axios";
import server from "../config";
export const addAttendance = (course, date, time) => async (
dispatch,
getState
) => {
try {
dispatch({ type: CREATE_ATTENDANCE_REQUEST });
const {
teacherLogin: { teacherInfo },
} = getState();
const config = {
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${teacherInfo.token}`,
},
};
const { data } = await axios.post(
`${server.BACKEND_API}/create`,
{ course, date, time },
config
);
dispatch({
type: CREATE_ATTENDANCE_SUCCESS,
payload: data,
});
} catch (err) {
dispatch({
type: CREATE_ATTENDANCE_FAIL,
payload: err,
});
}
};
export const getRecords = () => async (dispatch, getState) => {
try {
dispatch({ type: GET_ATTENDANCE_REQUEST });
const {
teacherLogin: { teacherInfo },
} = getState();
const config = {
headers: {
Authorization: `Bearer ${teacherInfo.token}`,
},
};
const { data } = await axios.get(`${server.BACKEND_API}/`, config);
dispatch({
type: GET_ATTENDANCE_SUCCESS,
payload: data,
});
} catch (err) {
dispatch({
type: GET_ATTENDANCE_FAIL,
payload: err,
});
}
};
export const getStudents = (recordId) => async (dispatch, getState) => {
try {
dispatch({ type: GET_ATTENDANCE_LIST_REQUEST });
const {
teacherLogin: { teacherInfo },
} = getState();
const config = {
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${teacherInfo.token}`,
},
};
const { data } = await axios.post(
`${server.BACKEND_API}/list`,
{ recordId },
config
);
dispatch({
type: GET_ATTENDANCE_LIST_SUCCESS,
payload: data,
});
} catch (err) {
dispatch({
type: GET_ATTENDANCE_LIST_FAIL,
payload: err,
});
}
};
export const addStudent = (recordId, name, rollno, branch) => async (
dispatch,
getState
) => {
try {
dispatch({ type: ADD_STUDENT_REQUEST });
const {
teacherLogin: { teacherInfo },
} = getState();
const config = {
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${teacherInfo.token}`,
},
};
const { data } = await axios.post(
`${server.BACKEND_API}/add`,
{ recordId, name, rollno, branch },
config
);
dispatch({
type: ADD_STUDENT_SUCCESS,
payload: data,
});
} catch (err) {
dispatch({
type: ADD_STUDENT_FAIL,
payload: err,
});
}
};
export const deleteAttendance = (recordId, navigation) => async (
dispatch,
getState
) => {
try {
dispatch({ type: ATTENDANCE_DELETE_REQUEST });
const {
teacherLogin: { teacherInfo },
} = getState();
const config = {
headers: {
Authorization: `Bearer ${teacherInfo.token}`,
},
};
const { data } = await axios.delete(
`${server.BACKEND_API}/${recordId}`,
config
);
if (data) {
navigation.navigate("Menu");
}
dispatch({
type: ATTENDANCE_DELETE_SUCCESS,
payload: data,
});
} catch (err) {
dispatch({
type: ATTENDANCE_DELETE_FAIL,
payload: err,
});
}
};
<file_sep>import React, { useState, useEffect } from "react";
import {
StyleSheet,
Text,
View,
TextInput,
Button,
Platform,
ActivityIndicator,
} from "react-native";
import DateTimePicker from "@react-native-community/datetimepicker";
import { useSelector, useDispatch } from "react-redux";
import { addAttendance } from "../actions/attendanceActions";
import AsyncStorage from "@react-native-async-storage/async-storage";
const CreateAttendanceScreen = (props) => {
const [date, setDate] = useState(new Date(1598051730000));
const [mode, setMode] = useState("date");
const [show, setShow] = useState(false);
const [course, setCourse] = useState("");
const [time, setTime] = useState(new Date().toLocaleTimeString());
const dispatch = useDispatch();
const record = useSelector((state) => state.attendance);
const { loading } = record;
const onChange = (event, selectedDate) => {
const currentDate = selectedDate || date;
setShow(Platform.OS === "ios");
setDate(currentDate);
};
const showMode = (currentMode) => {
setShow(true);
setMode(currentMode);
};
const showDatepicker = () => {
showMode("date");
};
const showTimepicker = () => {
showMode("time");
};
return (
<View style={styles.container}>
{loading ? (
<ActivityIndicator size="large" color="black" />
) : (
<>
<Text style={styles.formLabel}> ATTENDANCE </Text>
<View>
<Text styles={styles.label}>Course Name</Text>
<TextInput
placeholder="Enter Course Name"
style={styles.inputStyle}
value={course}
onChangeText={setCourse}
/>
<View>
<Text styles={styles.label}>Date</Text>
<Button onPress={showDatepicker} title="Select Date" />
</View>
<View>
<Text styles={styles.label}>Time</Text>
<Button onPress={showTimepicker} title="Select Time" />
</View>
{show && (
<DateTimePicker
testID="dateTimePicker"
value={date}
mode={mode}
is24Hour={true}
display="default"
onChange={onChange}
/>
)}
<View style={{ marginTop: 60 }}>
<Button
onPress={() => {
dispatch(addAttendance(course, date, time));
props.navigation.pop();
}}
title="Save"
/>
</View>
</View>
</>
)}
</View>
);
};
export default CreateAttendanceScreen;
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: "#fff",
alignItems: "center",
justifyContent: "center",
height: 50,
},
label: {
marginBottom: -30,
},
formLabel: {
fontSize: 20,
color: "black",
marginBottom: 40,
marginTop: -200,
},
inputStyle: {
marginTop: 10,
width: 300,
height: 40,
marginBottom: 10,
paddingHorizontal: 10,
borderRadius: 50,
backgroundColor: "#DCDCDC",
},
formText: {
alignItems: "center",
justifyContent: "center",
color: "#fff",
fontSize: 20,
},
text: {
color: "#fff",
fontSize: 20,
},
});
<file_sep>import React, { useState } from "react";
import { StyleSheet, Text, View, Button, TouchableOpacity } from "react-native";
const StartScreen = (props) => {
return (
<View style={styles.screen}>
<Text style={styles.text}>QR Based</Text>
<Text style={styles.text}>Attendance System</Text>
<View style={{ marginTop: 60 }}>
<TouchableOpacity
onPress={() => {
props.navigation.navigate("Login");
}}
style={styles.appButtonContainer}
>
<Text style={styles.appButtonText}>LOGIN</Text>
</TouchableOpacity>
<TouchableOpacity
onPress={() => {
props.navigation.navigate("Register");
}}
style={styles.appButtonContainer1}
>
<Text style={styles.appButtonText}>REGISTER</Text>
</TouchableOpacity>
</View>
</View>
);
};
export default StartScreen;
const styles = StyleSheet.create({
screen: {
alignContent: "center",
padding: 60,
},
text: {
fontSize: 26,
textTransform: "uppercase",
},
appButtonContainer: {
elevation: 8,
backgroundColor: "#009688",
borderRadius: 10,
paddingVertical: 10,
paddingHorizontal: 12,
},
appButtonContainer1: {
elevation: 8,
backgroundColor: "#009688",
borderRadius: 10,
paddingVertical: 10,
paddingHorizontal: 12,
marginTop: 20,
},
appButtonText: {
fontSize: 18,
color: "#fff",
fontWeight: "bold",
alignSelf: "center",
textTransform: "uppercase",
},
});
<file_sep>import React from "react";
import {
StyleSheet,
Text,
View,
TouchableOpacity,
Button,
ActivityIndicator,
} from "react-native";
import { useDispatch, useSelector } from "react-redux";
import { deleteAttendance } from "../actions/attendanceActions";
import { Card } from "react-native-elements";
const CardRender = ({ course, date, time, students, name, navigation, id }) => {
const dispatch = useDispatch();
const info = useSelector((state) => state.deleteAttendance);
const { loading } = info;
return (
<View>
{loading ? (
<ActivityIndicator
style={{ marginTop: 120 }}
size="large"
color="black"
/>
) : (
<>
<TouchableOpacity
onPress={() => {
navigation.navigate({
routeName: "List",
params: {
id,
students,
course,
},
});
}}
>
<Card>
<Card.Title style={{ textTransform: "uppercase" }}>
{course}
</Card.Title>
<Card.Divider />
<Text>TIME - {time}</Text>
<Text>DATE - {date}</Text>
<Text>TEACHER - {name}</Text>
<Card.Divider />
<View
style={{
display: "flex",
flexDirection: "row",
justifyContent: "space-evenly",
}}
>
<Button
title="VIEW"
onPress={() => {
navigation.navigate({
routeName: "List",
params: {
id,
students,
course,
},
});
}}
color="black"
/>
<Button
title="DELETE"
onPress={() => {
dispatch(deleteAttendance(id, navigation));
}}
color="black"
/>
</View>
</Card>
</TouchableOpacity>
</>
)}
</View>
);
};
export default CardRender;
const styles = StyleSheet.create({});
<file_sep>import React, { useEffect, useState } from "react";
import { StyleSheet, Text, View, Button, Alert } from "react-native";
import { BarCodeScanner } from "expo-barcode-scanner";
import { useDispatch, useSelector } from "react-redux";
import { addStudent } from "../actions/attendanceActions";
import AsyncStorage from "@react-native-async-storage/async-storage";
const ScannerScreen = (props) => {
let name, rollno, branch;
const [hasPermission, setHasPermission] = useState(null);
const [scanned, setScanned] = useState(false);
const [set, setSet] = useState({});
const dispatch = useDispatch();
const recordId = props.navigation.getParam("courseId");
useEffect(() => {
(async () => {
const { status } = await BarCodeScanner.requestPermissionsAsync();
setHasPermission(status === "granted");
})();
}, []);
const handleBarCodeScanned = ({ type, data }) => {
setScanned(true);
setSet(type);
const splitted = data.split(" ");
name = splitted[0];
rollno = splitted[1];
branch = splitted[2];
Alert.alert(
"Barcode Scanned",
`Bar code with type ${type} and data ${data} has been scanned!`,
[
{
text: "Ok",
onPress: () => {
dispatch(addStudent(recordId, name, rollno, branch));
props.navigation.goBack();
},
},
]
);
};
if (hasPermission === null) {
return <Text>Requesting for camera permission</Text>;
}
if (hasPermission === false) {
return <Text>No access to camera</Text>;
}
return (
<View
style={{ flex: 1, flexDirection: "column", justifyContent: "flex-end" }}
>
<BarCodeScanner
onBarCodeScanned={scanned ? undefined : handleBarCodeScanned}
style={StyleSheet.absoluteFillObject}
on
/>
{scanned && (
<Button
title={"Tap to Scan Again"}
onPress={() => {
setScanned(false);
}}
/>
)}
</View>
);
};
export default ScannerScreen;
const styles = StyleSheet.create({});
<file_sep>import mongoose from "mongoose";
import dotenv from "dotenv";
dotenv.config();
const connect = async () => {
mongoose
.connect(process.env.MONGODB, {
useUnifiedTopology: true,
useNewUrlParser: true,
useCreateIndex: true,
useFindAndModify: false,
})
.then(console.log("Database Connected"))
.catch((err) => {
console.log(err);
});
};
export default connect;
<file_sep>import Attendance from "../models/attendance.js";
import asyncHandler from "express-async-handler";
export const createAttendance = asyncHandler(async (req, res) => {
const id = req.teacher._id;
const { course, date, time } = req.body;
const attendance = await Attendance.create({
course,
teacherId: id,
date,
time,
});
if (attendance) {
res.json({
course: attendance.course,
teacherId: id,
date: attendance.date,
time: attendance.time,
students: [],
});
} else {
res.status(400);
throw new Error("Invalid Attendance");
}
});
export const getAttendanceRecords = asyncHandler(async (req, res) => {
try {
const attendances = await Attendance.find({ teacherId: req.teacher._id });
res.json(attendances);
} catch (err) {
res.status(400);
throw new Error(err);
}
});
export const addStudentRecord = asyncHandler(async (req, res) => {
try {
const { recordId, name, rollno, branch } = req.body;
const userRecords = await Attendance.findById(recordId);
const record = {
name,
rollno,
branch,
teacher: req.teacher._id,
};
userRecords.students.push(record);
await userRecords.save();
res.status(200).json({
name: userRecords.course,
student: userRecords.students,
});
} catch (err) {
res.status(400);
throw new Error("Failed to save");
}
});
export const getAttendanceByCourse = asyncHandler(async (req, res) => {
try {
const { recordId } = req.body;
const records = await Attendance.findById(recordId);
if (records) {
res.status(200).json({
students: records.students,
});
}
} catch (err) {
res.status(400);
throw new Error("No Attendance records");
}
});
export const deleteAttendanceRecord = asyncHandler(async (req, res) => {
const attendance = await Attendance.findById(req.params.id);
if (attendance) {
await attendance.remove();
res.json({ message: "Attendance removed" });
} else {
res.status(404);
throw new Error("Attendance not found");
}
});
<file_sep>import React from "react";
import { StyleSheet, Text, View } from "react-native";
import Card from "../components/Card";
import { useDispatch, useSelector } from "react-redux";
const AddStudentScreen = () => {
return (
<View>
<Text>ADD student</Text>
</View>
);
};
export default AddStudentScreen;
const styles = StyleSheet.create({});
<file_sep>import mongoose from "mongoose";
const studentSchema = new mongoose.Schema({
name: {
type: String,
required: true,
},
rollno: {
type: Number,
required: true,
unique: true,
},
branch: {
type: String,
required: true,
},
teacher: {
type: mongoose.Schema.Types.ObjectId,
required: true,
ref: "Teacher",
},
});
const attendanceSchema = new mongoose.Schema(
{
course: {
type: String,
trim: true,
required: true,
},
teacherId: String,
date: String,
time: String,
students: [studentSchema],
},
{
timestamps: true,
}
);
export default mongoose.model("Attendance", attendanceSchema);
<file_sep>import express from "express";
const router = express.Router();
import {
loginChecker,
registerTeacher,
getProfile,
updateTeacherProfile,
} from "../controllers/authController.js";
import { authorization } from "../middlewares/authMiddleware.js";
router.post("/login", loginChecker);
router.get("/profile", authorization, getProfile);
router.post("/register", registerTeacher);
router.put("/update", authorization, updateTeacherProfile);
export default router;
<file_sep>import Teacher from "../models/teacher.js";
import asyncHandler from "express-async-handler";
import generateJWT from "../generateJWT.js";
//Register a user
export const registerTeacher = asyncHandler(async (req, res) => {
const { name, lastname, email, password } = req.body;
const alreadyExists = await Teacher.findOne({ email });
if (alreadyExists) {
res.status(400);
throw new Error(
"User Already exists , try with different email and password"
);
}
const teacher = await Teacher.create({ name, lastname, email, password });
if (teacher) {
res.status(201).json({
_id: teacher._id,
name: teacher.name,
lastname: teacher.lastname,
email: teacher.email,
token: generateJWT(teacher._id),
});
} else {
res.status(400);
throw new Error("Invalid credentials");
}
});
//Loggin checker
export const loginChecker = asyncHandler(async (req, res) => {
const { email, password } = req.body;
const teacher = await Teacher.findOne({ email });
if (teacher && (await teacher.matchPassword(password))) {
res.json({
_id: teacher._id,
name: teacher.name,
lastname: teacher.lastname,
email: teacher.email,
token: generateJWT(teacher._id),
});
} else {
res.status(401).json({
message: "invalid credentials",
});
}
});
//Get teacher profile
export const getProfile = asyncHandler(async (req, res) => {
const teacher = await Teacher.findById(req.teacher._id);
if (teacher) {
res.json({
_id: teacher._id,
name: teacher.name,
email: teacher.email,
token: generateJWT(teacher._id),
});
} else {
res.status(404);
throw new Error("User Not Found");
}
});
export const updateTeacherProfile = asyncHandler(async (req, res) => {
const teacher = await Teacher.findById(req.teacher._id);
if (teacher) {
teacher.name = req.body.name || teacher.name;
teacher.lastname = req.body.lastname || teacher.lastname;
teacher.email = req.body.email || teacher.email;
if (req.body.password) {
teacher.password = req.body.password;
}
const updatedteacher = await teacher.save();
res.json({
_id: updatedteacher._id,
name: updatedteacher.name,
lastname: updatedteacher.lastname,
email: updatedteacher.email,
token: generateJWT(updatedteacher._id),
});
} else {
res.status(404);
throw new Error("Teacher not found");
}
});
<file_sep>import {
CREATE_ATTENDANCE_FAIL,
CREATE_ATTENDANCE_REQUEST,
CREATE_SINGLE_RECORD,
CREATE_ATTENDANCE_SUCCESS,
GET_ATTENDANCE_SUCCESS,
GET_ATTENDANCE_REQUEST,
GET_ATTENDANCE_FAIL,
RECORDS_RESET,
GET_ATTENDANCE_LIST_REQUEST,
GET_ATTENDANCE_LIST_SUCCESS,
GET_ATTENDANCE_LIST_FAIL,
ADD_STUDENT_FAIL,
ADD_STUDENT_REQUEST,
ADD_STUDENT_SUCCESS,
ATTENDANCE_DELETE_FAIL,
ATTENDANCE_DELETE_SUCCESS,
ATTENDANCE_DELETE_REQUEST,
} from "../constants";
export const studentsAttendance = (state = [], action) => {
switch (action.payload) {
case CREATE_SINGLE_RECORD:
return [...state, action.payload];
case RECORDS_RESET:
return [];
default:
return state;
}
};
export const createAttendanceRecord = (state = {}, action) => {
switch (action.payload) {
case CREATE_ATTENDANCE_REQUEST:
return { loading: true, record: {} };
case CREATE_ATTENDANCE_SUCCESS:
return { loading: false, success: true, record: action.payload };
case CREATE_ATTENDANCE_FAIL:
return { loading: false, error: action.payload };
default:
return state;
}
};
export const getAttendanceReducer = (state = { attendances: [] }, action) => {
switch (action.type) {
case GET_ATTENDANCE_REQUEST:
return { loading: true, attendances: [] };
case GET_ATTENDANCE_SUCCESS:
return { loading: false, success: true, attendances: action.payload };
case GET_ATTENDANCE_FAIL:
return { loading: false, error: action.payload };
default:
return state;
}
};
export const getStudentListReducer = (state = { students: [] }, action) => {
switch (action.type) {
case GET_ATTENDANCE_LIST_REQUEST:
return { loading: true, students: [] };
case GET_ATTENDANCE_LIST_SUCCESS:
return { loading: false, success: true, students: action.payload };
case GET_ATTENDANCE_LIST_FAIL:
return { loading: false, error: action.payload };
default:
return state;
}
};
export const addStudentReducer = (state = {}, action) => {
switch (action.type) {
case ADD_STUDENT_REQUEST:
return { loading: true, student: {} };
case ADD_STUDENT_SUCCESS:
return { loading: false, student: action.payload };
case ADD_STUDENT_FAIL:
return { loading: false, err: action.payload };
default:
return state;
}
};
export const attendanceDeleteReducer = (state = {}, action) => {
switch (action.type) {
case ATTENDANCE_DELETE_REQUEST:
return { loading: true };
case ATTENDANCE_DELETE_SUCCESS:
return { loading: false, success: true };
case ATTENDANCE_DELETE_FAIL:
return { loading: false, error: action.payload };
default:
return state;
}
};
<file_sep>import React, { useEffect } from "react";
import {
StyleSheet,
Text,
View,
FlatList,
ActivityIndicator,
SafeAreaView,
} from "react-native";
import { useDispatch, useSelector } from "react-redux";
import { getRecords } from "../actions/attendanceActions";
import CardRender from "../components/Card";
const ViewAttendanceScreen = (props) => {
const data = useSelector((state) => state.records);
const teacherData = useSelector((state) => state.teacherLogin);
const { teacherInfo } = teacherData;
const { attendances, loading } = data;
const dispatch = useDispatch();
const loadData = () => {
dispatch(getRecords());
};
const renderCard = ({ item }) => {
console.log(item);
return (
<CardRender
id={item._id}
course={item.course}
date={item.date}
time={item.time}
students={item.students}
name={teacherInfo.name}
navigation={props.navigation}
/>
);
};
useEffect(() => {
loadData();
}, []);
return (
<SafeAreaView style={{ flex: 1 }}>
{loading ? (
<ActivityIndicator
style={{ marginTop: 120 }}
size="large"
color="black"
/>
) : (
<View>
<FlatList
data={attendances}
keyExtractor={(item) => item._id}
renderItem={renderCard}
/>
</View>
)}
</SafeAreaView>
);
};
export default ViewAttendanceScreen;
const styles = StyleSheet.create({});
<file_sep>import React from "react";
import {
StyleSheet,
Text,
View,
TouchableOpacity,
ActivityIndicator,
} from "react-native";
import { logout } from "../actions/teacherActions";
import { useDispatch, useSelector } from "react-redux";
const MenuScreen = (props) => {
const info = useSelector((state) => state.teacherLogin);
const { loading } = info;
const dispatch = useDispatch();
const { navigation } = props;
return (
<View style={{ padding: 60, alignContent: "center" }}>
<View style={{ marginTop: 60 }}>
<TouchableOpacity
onPress={() => {
props.navigation.navigate("Create");
}}
style={styles.appButtonContainer}
>
<Text style={styles.appButtonText}>ADD COURSE AND SCHEDULE</Text>
</TouchableOpacity>
<TouchableOpacity
onPress={() => {
props.navigation.navigate("Attendance");
}}
style={styles.appButtonContainer1}
>
<Text style={styles.appButtonText}>ADD STUDENT ATTENDANCE</Text>
</TouchableOpacity>
<TouchableOpacity
onPress={() => {
props.navigation.navigate("Profile");
}}
style={styles.appButtonContainer1}
>
<Text style={styles.appButtonText}>PROFILE</Text>
</TouchableOpacity>
<TouchableOpacity
onPress={() => {
dispatch(logout());
navigation.navigate("Start");
}}
style={styles.appButtonContainer1}
>
<Text style={styles.appButtonText}>LOGOUT</Text>
</TouchableOpacity>
</View>
</View>
);
};
MenuScreen.navigationOptions = (navigationData) => {
const headerName = navigationData.navigation.getParam("userName");
return {
headerTitle: `Welcome ${headerName}`,
};
};
export default MenuScreen;
const styles = StyleSheet.create({
appButtonContainer: {
elevation: 8,
backgroundColor: "#009688",
borderRadius: 10,
paddingVertical: 10,
paddingHorizontal: 12,
},
appButtonContainer1: {
elevation: 8,
backgroundColor: "#009688",
borderRadius: 10,
paddingVertical: 10,
paddingHorizontal: 12,
marginTop: 20,
},
appButtonText: {
fontSize: 18,
color: "#fff",
fontWeight: "bold",
alignSelf: "center",
textTransform: "uppercase",
},
});
<file_sep>import {
CREATE_TEACHER_FAIL,
CREATE_TEACHER_REQUEST,
CREATE_TEACHER_SUCCESS,
LOGOUT,
USER_LOGIN_FAIL,
USER_LOGIN_REQUEST,
USER_LOGIN_SUCCESS,
UPDATE_TEACHER_FAIL,
UPDATE_TEACHER_REQUEST,
UPDATE_TEACHER_SUCCESS,
} from "../constants";
export const loginReducer = (state = {}, action) => {
switch (action.type) {
case USER_LOGIN_REQUEST:
return { loading: true };
case USER_LOGIN_SUCCESS:
return { loading: false, success: true, teacherInfo: action.payload };
case USER_LOGIN_FAIL:
return { loading: false, error: action.payload };
case LOGOUT:
return {};
default:
return state;
}
};
export const registerReducer = (state = {}, action) => {
switch (action.payload) {
case CREATE_TEACHER_REQUEST:
return { loading: true };
case CREATE_TEACHER_SUCCESS:
return { loading: false, teacherInfo: action.payload };
case CREATE_TEACHER_FAIL:
return { loading: false, error: action.payload };
default:
return state;
}
};
export const updateReducer = (state = {}, action) => {
switch (action.payload) {
case UPDATE_TEACHER_REQUEST:
return { loading: true };
case UPDATE_TEACHER_SUCCESS:
return { loading: false, teacherInfo: action.payload };
case UPDATE_TEACHER_FAIL:
return { loading: false, error: action.payload };
default:
return state;
}
};
<file_sep>import express from "express";
const router = express.Router();
import { authorization } from "../middlewares/authMiddleware.js";
import {
createAttendance,
getAttendanceRecords,
addStudentRecord,
getAttendanceByCourse,
deleteAttendanceRecord,
} from "../controllers/attenndanceController.js";
//get attendance records
router.get("/", authorization, getAttendanceRecords);
//get attendance list by course name
//save attendance to database
router.post("/create", authorization, createAttendance);
//Add single student record in database
router.post("/add", authorization, addStudentRecord);
//Get records by course id
router.post("/list", authorization, getAttendanceByCourse);
//delete attendance record
router.delete("/:id", authorization, deleteAttendanceRecord);
export default router;
<file_sep>import express from "express";
const app = express();
import connect from "./db.js";
import authRoutes from "./routes/authRoutes.js";
import attendanceRoutes from "./routes/attendanceRoutes.js";
import cors from "cors";
import bodyParser from "body-parser";
//Database
connect();
//Middlewares
app.use(bodyParser.json());
app.use(bodyParser.urlencoded({ extended: true }));
//Routes
app.use("/api/auth", authRoutes);
app.use("/api", attendanceRoutes);
const PORT = process.env.PORT || 6000;
app.listen(PORT, () => {
console.log(`Server is up and running at ${PORT}`);
});
<file_sep>import mongoose from "mongoose";
import bcrypt from "bcryptjs";
//const { v4: uuidv4 } = require("uuid");
const teacherSchema = new mongoose.Schema(
{
name: {
type: String,
required: true,
maxlength: 32,
trim: true,
},
lastname: {
type: String,
maxlength: 32,
trim: true,
},
email: {
type: String,
trim: true,
required: true,
unique: true,
},
password: {
type: String,
required: true,
},
},
{
timestamps: true,
}
);
teacherSchema.methods.matchPassword = async function (entered_password) {
return await bcrypt.compare(entered_password, this.password);
};
teacherSchema.pre("save", async function (next) {
if (!this.isModified("password")) {
next();
}
const salt = await bcrypt.genSalt(10);
this.password = await bcrypt.hash(this.password, salt);
});
export default mongoose.model("Teacher", teacherSchema);
|
a291c2985632298b2b2ffbb7880ccea416392ac6
|
[
"JavaScript"
] | 21 |
JavaScript
|
jarodburgess49/QR-based-Attendance-App
|
0ff000a885ebe4bd0b9dbdcc548b70b2586e8493
|
bc1f28f06b34aef4360e531af5a08d5c2a2e3d99
|
refs/heads/master
|
<file_sep>//ๆฑไธไธชๅญ็ฌฆไธฒไธญๅคงๅๅญๆฏๆฐ๏ผๅฐๅๅญๆฏๆฐๅ้่ฑๆๅญๆฏๆฐ
public class CompareChar
{
public static void main(String[] args)
{
String s = "12345678";
char[] ca = s.toCharArray();//ๆๅญ็ฌฆไธฒ่ฝฌๆขไธบๆฐ็ป
int Total_Len = s.length(); //ca.length; //ๅญ็ฌฆไธฒๆป้ฟๅบฆ
int S_Num=0;//ไฟๅญๅฐๅๅญๆฏ็ไธชๆฐ
int B_Num=0;//ไฟๅญๅคงๅๅญๆฏ็ไธชๆฐ
for(int i=0;i<ca.length;i++)
{
Character c = ca[i];
if(c.compareTo('a')>=0&&c.compareTo('z')<=0)
{
S_Num++;
}
if(c.compareTo('A')>=0&&c.compareTo('Z')<=0)
{
B_Num++;
}
}
int Other_Num = Total_Len - S_Num - B_Num;//ๅ
ถไปๅญ็ฌฆ็ไธชๆฐ = ๅญ็ฌฆไธฒๆป้ฟๅบฆ - ๅฐๅๅญๆฏไธชๆฐ - ๅคงๅๅญๆฏไธชๆฐ
System.out.println("ๆป๏ผ" + Total_Len+ ",ๅฐๅ๏ผ"+S_Num+",ๅคงๅ๏ผ"+B_Num+"๏ผๅ
ถไป๏ผ"+Other_Num);
}
}
<file_sep> public class ArrayTestDrive
{
public static void main(String[] args)
{
int[] a={9,8,7,6,5,4,3,2,1};
Sort_A(a);
}
private static void Sort_A(int[] a ){
for(int l=0;l<a.length;l++){
System.out.print(a[l]+" ");
}
System.out.println();
int temp ;
int num ;
for(int i=0;i<a.length-1;i++)
{
temp = a[i];
num = i;
for(int j=i+1;j<a.length;j++)
{
if(temp>a[j])
{
num = j;
temp=a[j];
}
}
if(num != i){
temp= a[i];
a[i] = a[num];
a[num] = temp;
}
/*for(int j=i+1;j<a.length;j++)
{
if(a[i]>a[j])
{
a[i]=a[j];
a[j]=temp;
temp=a[i];
}
}*/
}
for(int l=0;l<a.length;l++){
System.out.print(a[l]+" ");
}
}
}
<file_sep>public class EnumTestDrive
{
public enum MyColor {red,green,blue};
public static void main(String[] args)
{
MyColor mc = MyColor.green;
switch(mc){
case red:
System.out.println("red");
break;
case green:
System.out.println("green");
break;
case blue:
System.out.println("blue");
break;
default:
System.out.println("***");
}
}
}
<file_sep>import java.util.*;
public class ListTestDrive
{
public static void main(String[] args)
{
List l1 = new LinkedList();
for(int i=0;i<10;i++)
{
l1.add(i,"list_"+i);
}
System.out.println(l1);
System.out.println(l1.get(3));
System.out.println(l1.indexOf("list_8"));
System.out.println(Collections.binarySearch(l1,"list_4"));
Collections.shuffle(l1);
System.out.println(l1);
Collections.sort(l1);
System.out.println(l1);
Collections.swap(l1,5,9);
System.out.println(l1);
Collections.reverse(l1);
System.out.println(l1);
}
}
<file_sep>import java.io.*;
public class ListFileTestDrive
{
public static void main(String[] args)
{
File f = new File("F:/A");
System.out.println("A");
listFile(f,1);
}
public static void listFile(File f,int level)
{
String preStr = "";
for(int i=0;i<level;i++)
{
preStr+=" ";
}
File[] fa = f.listFiles();
for(int i =0;i<fa.length;i++)
{
System.out.println(preStr+fa[i].getName());
if(fa[i].isDirectory())
{
listFile(fa[i],level+1);
}
}
}
}
<file_sep>class ArrayCopyTest
{
public static void main(String[] args)
{
int[][] intArray = {{1,2},{1,2,3},{3,4}};
int[][] copyArray = new int[3][3];
System.arraycopy(intArray,0,copyArray,0,intArray.length);
copyArray[2][1] = 8;
for(int i=0;i<copyArray.length;i++)
{
for(int j=0;j<copyArray[i].length;j++)
{
System.out.print(copyArray[i][j]+" ");
}
System.out.println();
}
String[] s = {"a","b","c","d","e"};
String[] c = new String[6];
System.arraycopy(s,0,c,0,5);
for(int i=0;i<c.length;i++)
{
System.out.println(c[i] +" ");
}
int[] a={1,2,3};
int[] b=new int[4];
System.arraycopy(a,0,b,0,a.length);
for(int i=0;i<b.length;i++)
{
System.out.println(b[i] +" ");
}
int[][] z =new int[2][2];
for(int i=0;i<z.length;i++)
{
for(int j=0;j<z[i].length;j++)
{
System.out.print(z[i][j]+" ");
}
}
}
}
<file_sep>import java.util.*;
public class IteratorTestDrive
{
public static void main(String[] args)
{
Collection c = new HashSet();
c.add(new Name("z","xj"));
c.add(new Name("z","xx"));
c.add(new Name("w","rf"));
c.add(new Name("w","j"));
Iterator i = c.iterator();
while(i.hasNext())
{
Name n = (Name)i.next();
if(n.getLastName().length()<2)
{
i.remove();
}
System.out.print(n.getLastName()+" ");
}
System.out.println();
System.out.println(c);
}
}
<file_sep>public class SellTicketsTest
{
public static void main(String[] args)
{
SellTicket s = new SellTicket();
new Thread(s,"Window1").start();
new Thread(s,"Window2").start();
new Thread(s,"Window3").start();
}
}
class SellTicket implements Runnable
{
int pId= 0;
public void sell()
{
pId++;
System.out.println(Thread.currentThread().getName() + "รรดรร" + pId +"ยบร
รยฑยฃยก");
}
public void run()
{
try
{
while(pId<200)
{
sell();
Thread.sleep(1);
}
}
catch (InterruptedException e)
{
e.printStackTrace();
}
}
}<file_sep>import java.util.*;
public class CollectionTestDrive
{
public static void main(String[] args)
{
List c = new ArrayList();
/*c.add(new String("haha"));
c.add(new Integer(35));
c.add(new Name("zhang","xj"));
System.out.println(c.size());
c.remove(new String("haha"));
c.remove(new Name("zhang","xj"));
System.out.println(c);
*/
c.add(new Name("z","a"));
c.add(new Name("a","b"));
c.add(new Name("a","c"));
c.add(new Name("e","a"));
c.add(new Name("f","b"));
Collections.sort(c);
System.out.println(c);
}
}
class Name implements Comparable
{
private String firstName,lastName;
public Name()
{
}
public Name(String First,String Last)
{
this.firstName = First;
this.lastName = Last;
}
public String getFirstName(){return firstName;}
public String getLastName(){return lastName;}
public String toString(){return firstName + " " +lastName;}
public boolean equals(Object obj)
{
if(obj instanceof Name)
{
Name n = (Name)obj;
return firstName.equals(n.firstName)&&lastName.equals(n.lastName);
}
return super.equals(obj);
}
public int hashCode()
{
return firstName.hashCode();
}
public int compareTo(Object o)
{
int temp = -1;
try
{
if(o instanceof Name)
{
Name n = (Name)o;
temp = firstName.compareTo(n.firstName);
if(temp != 0)
{
return temp;
}
else
{
temp=lastName.compareTo(n.lastName);
}
}
}
catch (Exception e)
{
e.printStackTrace();
}
return temp;
}
}<file_sep>class Foo
{
int x =12;
public static void go(final int x)
{
System.out.println(x);
}
public static void main(String[] args)
{
System.out.println("Hello World!");
}
}
<file_sep>public class TTT {
public static void main(String[] args) {
a aa = new b();
aa.ff();
System.out.println(aa.a);
System.out.println(((b)aa).a);
}
}
class a {
public int a = 1;
public void ff() {
System.out.println(a);
}
}
class b extends a {
public int a = 2;
public void ff() {
System.out.println(super.a + " " + this.a);
super.ff();
}
}
<file_sep>public class SearchTestDrive
{
public static void main(String[] args)
{
int[] a =new int[500];
for(int i = 0;i<a.length;i++)
{
a[i] = 1;
}
Search(a);
}
public static int Count(int[] a)
{
int temp = 0;
for(int i=0;i<a.length;i++)
{ if(a[i]==1)
{
temp +=a[i];
}
}
return temp;
}
private static void Search(int[] a)
{
int len = a.length;
int index = 0;
int num = 0;
while(Count(a) > 1)
{
if(a[index] == 1)
{
num++;
if(num==3)
{
num=0;
a[index]=0;
}
}
index++;
if(index == len)
{
index = 0;
}
}
for(int i=0;i<len;i++)
{
if(a[i]==1)
{
System.out.println(i);
}
}
}
}
<file_sep>import java.io.*;
public class FileTestDrive
{
public static void main(String[] args)
{
String separator = File.separator;
String filename = "myfile.txt";
String directory = "mydir1"+separator+"mydir2";
File f = new File(directory,filename);
if(f.exists())
{
System.out.println("ๆไปถๅ๏ผ"+f.getAbsolutePath());
System.out.println("ๆไปถๅคงๅฐ๏ผ"+f.length());
}else
{
f.getParentFile().mkdirs();
try
{
f.createNewFile();
}
catch (IOException e)
{
e.printStackTrace();
}
}
String f2 ="F:/็ฏ็Java่ฎฒไน/codes/MSB/11";
File nf =new File(f2);
File[] fa = nf.listFiles();
for(int i =0;i<fa.length;i++)
{
System.out.println(fa[i].getName());
}
}
}
|
ae5cbb48ae16cbdbcfc1f3838d0f9d4bef6bacf2
|
[
"Java"
] | 13 |
Java
|
LittleLazyCat/MyProject
|
cbfff027d0f6eaff02513a09175199db50d73dcc
|
0416efdde6383efcf525e8114aa08fea975035d4
|
refs/heads/master
|
<file_sep>public class BoomerSooner{
public static void main (String[] args){
System.out.println("Forget those Cowboys, let's go Sooners!");
System.out.println("Let's count to 1000, Sooner Style");
for (int i=1;i<=1000;i++){
boolean printNumb = true;
if ((i % 3) == 0){
System.out.print("Boomer");
printNumb = false;
}
if (( i % 5) == 0){
System.out.print("Sooner");
printNumb = false;
}
if(printNumb){
System.out.print(i);
}
System.out.println(" ");
}
}
}
|
eb9e6cc2da376824eb068aa5a1da30a8ce4b4b57
|
[
"Java"
] | 1 |
Java
|
RoyJH/CIS2323_Week18_Final_1
|
b50b2061e06de7b3448c9b01c8f6bcb233e9d976
|
b7d7d6b831c4094c106e0fa309da26fc20a628c8
|
refs/heads/master
|
<file_sep>package an.myapplication;
import android.util.Log;
import com.jme3.app.SimpleApplication;
import com.jme3.asset.AssetManager;
import com.jme3.asset.TextureKey;
import com.jme3.bullet.BulletAppState;
import com.jme3.bullet.PhysicsSpace;
import com.jme3.bullet.collision.shapes.CapsuleCollisionShape;
import com.jme3.bullet.collision.shapes.PlaneCollisionShape;
import com.jme3.bullet.control.CharacterControl;
import com.jme3.bullet.control.RigidBodyControl;
import com.jme3.input.KeyInput;
import com.jme3.input.controls.ActionListener;
import com.jme3.input.controls.AnalogListener;
import com.jme3.input.controls.KeyTrigger;
import com.jme3.input.controls.TouchListener;
import com.jme3.input.controls.TouchTrigger;
import com.jme3.input.event.TouchEvent;
import com.jme3.light.AmbientLight;
import com.jme3.material.Material;
import com.jme3.math.ColorRGBA;
import com.jme3.math.Plane;
import com.jme3.math.Vector3f;
import com.jme3.scene.Geometry;
import com.jme3.scene.Node;
import com.jme3.scene.shape.Box;
import com.jme3.shadow.BasicShadowRenderer;
import com.jme3.system.JmeSystem;
import com.jme3.texture.Texture;
import static com.jme3.input.KeyInput.KEY_DOWN;
import static com.jme3.input.event.TouchEvent.Type.MOVE;
import static com.jme3.input.event.TouchEvent.Type.TAP;
public class Main extends SimpleApplication implements ActionListener,TouchListener {
private BulletAppState bulletAppState = new BulletAppState();
BasicShadowRenderer bsr;
Material mat;
Material mat2;
Material mat3;
private boolean left = false, right = false, up = false, down = false;
private Vector3f walkDirection = new Vector3f();
private CharacterControl player;
public void simpleInitApp() {
bulletAppState = new BulletAppState();
stateManager.attach(bulletAppState);
inputManager.addMapping("Touch", new TouchTrigger (0));
inputManager.addListener(this, new String[]{"Touch"});
initMaterial();
createPhysicsTestWorld(rootNode, assetManager, bulletAppState.getPhysicsSpace());
CapsuleCollisionShape capsuleShape = new CapsuleCollisionShape(1.5f, 6f, 1);
player = new CharacterControl(capsuleShape, 0.05f);
player.setJumpSpeed(20);
player.setFallSpeed(30);
player.setGravity(30);
player.setPhysicsLocation(new Vector3f(0, 10, 0));
bulletAppState.getPhysicsSpace().add(player);
}
public static void createPhysicsTestWorld(Node rootNode, AssetManager assetManager, PhysicsSpace space) {
AmbientLight light = new AmbientLight();
light.setColor(ColorRGBA.LightGray);
rootNode.addLight(light);
Material material = new Material(assetManager, "Common/MatDefs/Misc/Unshaded.j3md");
material.setTexture("ColorMap", assetManager.loadTexture("smartmonkey.png"));
Box floorBox = new Box(140, 0.25f, 140);
Geometry floorGeometry = new Geometry("Floor", floorBox);
floorGeometry.setMaterial(material);
floorGeometry.setLocalTranslation(0, -5, 0);
// Plane plane = new Plane();
//
// plane.setOriginNormal(new Vector3f(0, 0.25f, 0), Vector3f.UNIT_Y);
//
// floorGeometry.addControl(new RigidBodyControl(new PlaneCollisionShape (plane), 0));
floorGeometry.addControl(new RigidBodyControl (0));
rootNode.attachChild(floorGeometry);
space.add(floorGeometry);
//movable boxes
for (int i = 0; i < 12; i++) {
Box box = new Box(0.25f, 0.25f, 0.25f);
Geometry boxGeometry = new Geometry("Box", box);
boxGeometry.setMaterial(material);
boxGeometry.setLocalTranslation(i, 5, -3);
//RigidBodyControl automatically uses box collision shapes when attached to single geometry with box mesh
boxGeometry.addControl(new RigidBodyControl(2));
rootNode.attachChild(boxGeometry);
space.add(boxGeometry);
}
}
public void initMaterial() {
mat = new Material(assetManager, "Common/MatDefs/Misc/Unshaded.j3md");
TextureKey key = new TextureKey("drawable/ic_launcher.png");
key.setGenerateMips(true);
Texture tex = assetManager.loadTexture(key);
tex.setWrap(Texture.WrapMode.Repeat);
mat.setTexture("ColorMap", tex);
mat2 = new Material(assetManager, "Common/MatDefs/Misc/Unshaded.j3md");
TextureKey key2 = new TextureKey("drawable/ic_launcher.png");
key2.setGenerateMips(true);
Texture tex2 = assetManager.loadTexture(key2);
mat2.setTexture("ColorMap", tex2);
mat3 = new Material(assetManager, "Common/MatDefs/Misc/Unshaded.j3md");
TextureKey key3 = new TextureKey("drawable/ic_launcher.png");
key3.setGenerateMips(true);
Texture tex3 = assetManager.loadTexture(key3);
tex3.setWrap(Texture.WrapMode.Repeat);
mat3.setTexture("ColorMap", tex3);
}
@Override
public void onAction(String binding, boolean value, float tpf) {
Log.e("",""+binding);
}
@Override
public void onTouch(String binding, TouchEvent evt, float tpf) {
float x;
float y;
float pressure;
switch(evt.getType())
{
case MOVE:
x = evt.getX();
y = evt.getY();
pressure = evt.getPressure();
break;
case TAP:
x = evt.getX();
y = evt.getY();
break;
case LONGPRESSED:
// move forward
up = true;
break;
case UP:
up = false;
break;
case FLING:
break;
default:
break;
}
Log.e("","Event Type " + evt.getType());
evt.setConsumed();
}
@Override
public void simpleUpdate(float tpf) {
Vector3f camDir = cam.getDirection().clone().multLocal(0.6f);
Vector3f camLeft = cam.getLeft().clone().multLocal(0.4f);
walkDirection.set(0, 0, 0);
if (left) { walkDirection.addLocal(camLeft); }
if (right) { walkDirection.addLocal(camLeft.negate()); }
if (up) { walkDirection.addLocal(camDir); }
if (down) { walkDirection.addLocal(camDir.negate()); }
player.setWalkDirection(walkDirection);
cam.setLocation(player.getPhysicsLocation());
}
}
|
17db754505e93a3344f16a3ba47927ff3d5df710
|
[
"Java"
] | 1 |
Java
|
XlinksToDo/fffffffffffffffdf
|
88d97157723ebeb9e8acfa89b66e4ffe9bea69f1
|
bfe9babc4b4887319040f52770914086e629be69
|
refs/heads/master
|
<repo_name>spookyowl/sqlsession<file_sep>/sqlsession/__init__.py
from gevent import monkey
monkey.patch_all()
import re
import sqlalchemy
import sqlalchemy.engine
from sqlalchemy import func
from sqlalchemy.orm import sessionmaker
from sqlalchemy.schema import Table
from sqlalchemy.sql.expression import insert, select, update, delete
from sqlalchemy.sql.expression import text as text_statement
from sqlalchemy import and_
from sqlalchemy.exc import IntegrityError
from psycopg2.extensions import QuotedString as SqlString
import psycopg2.extensions
import gevent.socket
import urllib
try:
import itertools.imap as map
except ImportError:
pass
try:
text = unicode
except NameError:
text = str
#TODO: Lazy session !!!
#NOTE: Lazy sessions are problematic. potentionaly require log running connections
# with open cursor blocking reloads of tables. Solution: timeouts client/server side
# caching, throtling
def get_value(data, keys, default=None):
result = None
for k in keys:
result = data.get(k)
if result is not None:
return result
if result is None:
return default
def create_engine(params):
db_type = get_value(params, ['type', 'db_type'], 'pgsql')
default_port = None
if db_type == 'mysql':
default_port = 3306
elif db_type == 'pgsql':
default_port = 5432
elif db_type == 'mssql':
default_port = 1433
ctx = (get_value(params, ['user']),
get_value(params, ['passwd', '<PASSWORD>', '<PASSWORD>']),
get_value(params, ['host', 'server'], 'localhost'),
get_value(params, ['port'], default_port),
get_value(params, ['database', 'db_name', 'database_name', 'db']))
#TODO: harmonize, use quoting
if db_type == 'pgsql':
make_psycopg_green()
url = 'postgresql+psycopg2://%s:%s@%s:%s/%s' % ctx
elif db_type == 'mysql':
url = 'mysql+mysqldb://%s:%s@%s:%s/%s' % ctx
elif db_type == 'mssql':
url = 'mssql+pyodbc://%s:%s@%s:%s/%s?driver=SQLServer13' % ctx
else:
raise ValueError('db_type must be eighter "mysql"/"pgsql"/"mssql"')
engine = sqlalchemy.create_engine(url, implicit_returning=True)
return engine
def make_psycopg_green():
"""Configure Psycopg to be used with gevent in non-blocking way."""
if not hasattr(psycopg2.extensions, 'set_wait_callback'):
raise ImportError(
"support for coroutines not available in this Psycopg version (%s)"
% psycopg2.__version__)
psycopg2.extensions.set_wait_callback(gevent_wait_callback)
def gevent_wait_callback(conn, timeout=None,
# access these objects with LOAD_FAST instead of LOAD_GLOBAL lookup
POLL_OK = psycopg2.extensions.POLL_OK,
POLL_READ = psycopg2.extensions.POLL_READ,
POLL_WRITE = psycopg2.extensions.POLL_WRITE,
wait_read = gevent.socket.wait_read,
wait_write = gevent.socket.wait_write):
"""A wait callback useful to allow gevent to work with Psycopg."""
while 1:
state = conn.poll()
if state == POLL_OK:
break
elif state == POLL_READ:
wait_read(conn.fileno(), timeout=timeout)
elif state == POLL_WRITE:
wait_write(conn.fileno(), timeout=timeout)
else:
raise psycopg2.OperationalError("Bad result from poll: %r" % state)
def preprocess_table_data(table, data):
if isinstance(data, dict):
data = [data]
def convert(item):
result = {}
for column in table.columns:
key = column.name
value = item.get(text(key))
if value is not None:
result[key] = value
return result
return list(map(convert, data))
def build_pkey_condition(table, data):
pkeys = table.primary_key.columns
condition = []
for column in pkeys:
condition.append(column == data[column.name])
return and_(*condition)
def build_condition_from_dict(table, dict_condition):
condition = []
for key,value in dict_condition.items():
column = getattr(table.columns, key)
condition.append(column == value)
return and_(*condition)
def build_order_from_list(table, order_list):
def get_column(key, direction):
if direction is not None and direction not in ('desc', 'asc'):
raise ValueError("Order direction must be 'desc' or 'asc'")
if direction == 'desc':
return getattr(table.columns, key).desc()
else:
return getattr(table.columns, key)
def interpret_column(column):
if isinstance(column, tuple):
return get_column(column[1], column[0])
if isinstance(column, str) or isinstance(column, text):
return get_column(column, 'asc')
else:
raise ValueError('Can not interpret order statement. Use list of strings or tuples.')
if isinstance(order_list, list):
return list(map(interpret_column, order_list))
else:
return [interpret_column(order_list)]
class SqlSessionNotFound(Exception):
pass
class SqlSessionTooMany(Exception):
pass
class NoticeCollector(object):
def __init__(self):
self.buf = []
self.callback = None
def append(self, message):
message = message.rstrip()
if self.callback is not None:
self.callback(message)
self.buf.append(message)
if len(self.buf) > 50:
self.buf.pop(0)
def __iter__(self):
return iter(self.buf)
def __getitem__(self, val):
return self.buf.__getitem__(val)
def __setitem__(self, i, val):
return self.buf.__setitem__(i, val)
def __setslice__(self, i, j, x):
return self.buf.__setslice__(i, j, x)
class SqlSession(object):
def __init__(self, param = None, as_role=None):
self.column_names = None
self.transaction = None
self.as_role = as_role
self.database_type = 'pgsql'
self.disposable = False
if isinstance(param, sqlalchemy.engine.Engine):
self.engine = param
self.metadata = sqlalchemy.MetaData(self.engine)
else:
self.database_type = get_value(param, ['type', 'db_type'], 'pgsql')
self.engine = create_engine(param)
self.metadata = sqlalchemy.MetaData(self.engine)
self.disposable = True
def __enter__(self):
self.connection = self.engine.connect()
if self.database_type == 'pgsql':
self.connection.connection.connection.notices = NoticeCollector()
if self.as_role is not None:
self.set_role(self.as_role)
return self
def __exit__(self, type, value, traceback):
if self.transaction is not None:
self.transaction.commit()
self.transaction = None
self.connection.close()
if self.disposable:
self.engine.dispose()
def begin(self):
self.transaction = self.connection.begin()
def end(self):
if self.transaction is not None:
self.transaction.commit()
self.transaction.close()
self.transaction = None
def rollback(self):
if self.transaction is not None:
self.transaction.rollback()
self.transaction.close()
self.transaction = None
def execute(self, statement):
#if isinstance(statement, text):
# statement = text_statement(statement)
if self.transaction is not None:
return self.connection.execute(statement)
else:
result = self.connection.execute(statement)
self.connection.execute('commit;')
return result
def commit(self):
if self.transaction is not None:
self.transaction.commit()
self.transaction = None
else:
self.connection.execute('commit;')
def get_unbound_connection(self):
return self.engine.contextual_connect(close_with_result=True).execution_options(stream_results=True)
def get_table(self, schema_table_name):
t = schema_table_name.split('.')
if len(t) == 1:
table_name = t[0]
return Table(table_name, self.metadata, autoload=True,
autoload_with=self.engine)
elif len(t) == 2:
schema_name, table_name = t
return Table(table_name, self.metadata, autoload=True,
autoload_with=self.engine,
schema=schema_name)
else:
raise ValueError("schema_table_name")
def update(self, table, data, condition=None):
if isinstance(table, str):
table = self.get_table(table)
if condition is None:
condition = build_pkey_condition(table, data)
elif isinstance(condition, dict):
condition = build_condition_from_dict(table, condition)
data = preprocess_table_data(table, data)
stmt = update(table).where(condition).values(data[0])
return self.execute(stmt)
def insert(self, table, data):
if isinstance(table, str):
table = self.get_table(table)
data = preprocess_table_data(table, data)
stmt = insert(table, list(data), returning=table.primary_key.columns)
return self.execute(stmt)
def delete(self, table, condition=None):
if isinstance(table, str):
table = self.get_table(table)
if isinstance(condition, dict):
condition = build_condition_from_dict(table, condition)
stmt = delete(table).where(condition)
return self.execute(stmt)
def truncate(self, table):
raise RuntimeError('Not yet inmplement')
def get_statement(self, table, condition, order):
if isinstance(table, str) or isinstance(table, unicode):
table = self.get_table(table)
stmt = table.select()
if isinstance(condition, dict):
condition = build_condition_from_dict(table, condition)
if condition is not None:
stmt = stmt.where(condition)
if order is not None:
stmt = stmt.order_by(*build_order_from_list(table, order))
return stmt
def fetch_one(self, table, condition):
if isinstance(table, str) or isinstance(table, unicode):
table = self.get_table(table)
stmt = table.select()
if isinstance(condition, dict):
condition = build_condition_from_dict(table, condition)
if condition is not None:
stmt = stmt.where(condition)
return self.one(stmt)
def fetch_all(self, table, condition=None, order=None):
stmt = self.get_statement(table, condition, order)
return self.all(stmt)
def iter_all(self, table, condition=None, order=None):
stmt = self.get_statement(table, condition, order)
connection = self.get_unbound_connection()
data = connection.execute(stmt)
result = map(dict, data)
return result
def count(self, table, condition):
if isinstance(table, str) or isinstance(table, unicode):
table = self.get_table(table)
if isinstance(condition, dict):
condition = build_condition_from_dict(table, condition)
stmt = select([func.count('*')]).where(condition)
data = self.connection.execute(stmt)
data = list(data)[0][0]
return data
def one(self, statement):
data = self.connection.execute(statement)
self.column_names = data.keys()
data = list(map(dict, data))
if len(data) > 1:
raise SqlSessionTooMany("Expected exaclty one record, %s found" % len(data))
elif len(data) == 0:
raise SqlSessionNotFound("Row not found")
return data[0]
def all(self, statement):
data = self.connection.execute(statement)
self.column_names = data.keys()
result = list(map(dict, data))
return result
def drop_table(self, table):
if isinstance(table, str):
table = self.get_table(table)
table.drop()
def exists(self, schema_table_name):
if '.' in schema_table_name:
schema_name, table_name = schema_table_name.split('.')
else:
schema_name = None
table_name = schema_table_name
return self.engine.has_table(table_name, schema_name)
def get_current_timestamp(self):
statement = 'SELECT current_timestamp AS now;'
return self.one(statement)['now']
def get_local_timestamp(self):
statement = 'SELECT localtimestamp AS now;'
return self.one(statement)['now']
def set_log_callback(self, callback):
if self.database_type == 'pgsql':
self.connection.connection.connection.notices.callback = callback
def add_user(self, user_name):
if not re.match('[a-zA-Z0-9_]*', user_name):
raise ValueError('User name can contain only letters and numbers')
self.execute('CREATE USER %s' % user_name)
def add_group(self, group_name):
if not re.match('[a-zA-Z0-9_]*', group_name):
raise ValueError('Group name can contain only letters and numbers')
self.execute('CREATE GROUP %s' % group_name)
def rename_user(self, old_user_name, new_user_name):
if not re.match('[a-zA-Z0-9_]*', old_user_name):
raise ValueError('Old user name can contain only letters and numbers')
if not re.match('[a-zA-Z0-9_]*', new_user_name):
raise ValueError('New user name can contain only letters and numbers')
self.execute('ALTER USER %s RENAME TO %s;' % (old_user_name, new_user_name))
def rename_group(self, old_group_name, new_group_name):
if not re.match('[a-zA-Z0-9_]*', old_group_name):
raise ValueError('Old group name can contain only letters and numbers')
if not re.match('[a-zA-Z0-9_]*', new_group_name):
raise ValueError('New group name can contain only letters and numbers')
self.execute('ALTER GROUP %s RENAME TO %s;' % (old_group_name, new_group_name))
def add_user_to_group(self, user_name, group_name):
if not re.match('[a-zA-Z0-9_]*', user_name):
raise ValueError('User name can contain only letters and numbers')
if not re.match('[a-zA-Z0-9_]*', group_name):
raise ValueError('Group name can contain only letters and numbers')
self.execute('ALTER GROUP %s ADD USER %s' % (group_name, user_name))
def drop_user_from_group(self, user_name, group_name):
if not re.match('[a-zA-Z0-9_]*', user_name):
raise ValueError('User name can contain only letters and numbers')
if not re.match('[a-zA-Z0-9_]*', group_name):
raise ValueError('Group name can contain only letters and numbers')
self.execute('ALTER GROUP %s DROP USER %s' % (group_name, user_name))
def drop_user(self, user_name):
if not re.match('[a-zA-Z0-9_]*', user_name):
raise ValueError('User name can contain only letters and numbers')
self.execute('DROP USER %s' % user_name)
def drop_group(self, group_name):
if not re.match('[a-zA-Z0-9_]*', group_name):
raise ValueError('User name can contain only letters and numbers')
self.execute('DROP GROUP %s' % group_name)
def set_role(self, user_name):
if not re.match('[a-zA-Z0-9]*', user_name):
raise ValueError('User name can contain only letters and numbers')
self.execute('SET role=%s' % user_name)
def grant_role(self, user_name, target_role):
if not re.match('[a-zA-Z][a-zA-Z0-9_]*', user_name):
raise ValueError('User name can contain only letters and numbers')
if not re.match('[a-zA-Z0-9_]*', target_role):
raise ValueError('Target role can contain only letters and numbers')
self.execute('GRANT %s TO %s;' % (user_name, target_role))
def set_user_password(self, user_name, password):
if not re.match('[a-zA-Z0-9]*', user_name):
raise ValueError('User name can contain only letters and numbers')
#TODO:
escaped_passord = SqlString(password)
escaped_passord.encoding = 'utf-8'
self.execute("ALTER USER %s WITH PASSWORD %s;" % (user_name, escaped_passord))
|
04afbf0e5cefa048fdd327c3435de22a936c45d8
|
[
"Python"
] | 1 |
Python
|
spookyowl/sqlsession
|
59d1e50e96b7ab46baadf7fba611b4117821d92b
|
4a5e59bfa94f57e6c8c1acdace6e45327997f4c9
|
refs/heads/master
|
<repo_name>NicSchuler/DSF_NFLDraftPrediction<file_sep>/Project_Scripts/KNN.R
rm(list=ls())
graphics.off()
library(tidyverse)
library(caret) # Classification and Regression Training
library(ggplot2) # Data visualization
# Performance Measurement for training data (2005 to 2013)
KNNPerfMeas = data.frame(Method = character(), Sampling = character(), QB_TP = integer(), QB_TN = integer(), QB_FP = integer(), QB_FN = integer(),
WR_TP = integer(), WR_TN = integer(), WR_FP = integer(), WR_FN = integer(),
RB_TP = integer(), RB_TN = integer(), RB_FP = integer(), RB_FN = integer(),
Together_TP = integer(), Together_TN = integer(), Together_FP = integer(), Together_FN = integer(), stringsAsFactors = FALSE)
KNNPerfMeas[1,2] = "no_sampling"
KNNPerfMeas[2,2] = "oversampling"
KNNPerfMeas[3,2] = "undersampling"
KNNPerfMeas[4,2] = "Rose_both"
KNNPerfMeas[5,2] = "Smote"
KNNPerfMeas$Method = "KNN"
# Performance Measurement for testing data (2014)
KNNPerfMeasTest = data.frame(Method = character(), Sampling = character(), QB_TP = integer(), QB_TN = integer(), QB_FP = integer(), QB_FN = integer(),
WR_TP = integer(), WR_TN = integer(), WR_FP = integer(), WR_FN = integer(),
RB_TP = integer(), RB_TN = integer(), RB_FP = integer(), RB_FN = integer(),
Together_TP = integer(), Together_TN = integer(), Together_FP = integer(), Together_FN = integer(), stringsAsFactors = FALSE)
KNNPerfMeasTest[1,2] = "no_sampling"
KNNPerfMeasTest[2,2] = "oversampling"
KNNPerfMeasTest[3,2] = "undersampling"
KNNPerfMeasTest[4,2] = "Rose_both"
KNNPerfMeasTest[5,2] = "Smote"
KNNPerfMeasTest$Method = "KNN"
###################################################
# NOTICE
###################################################
# We will do the next steps 5 times (e.g. "1. No Splitting" does the same thing as "2. Oversampling"), but using different data for training the model
# In other words, this is the cross-validation of the sampling methods. The reason for doing it a couple of times instead of looping or functioning it
# is the easier availability of the steps in between in case of further processing them.
# 1. No Sampling ###################################################
load("../Data/CleanData/CleanClass2007to2014_3.Rdata")
# I. KNN Classifier - 07 to 13, together ----------
#1 - Preparations ----------
# Training data
CleanClass2007to2013_3<- CleanClass2007to2014_3[CleanClass2007to2014_3$Year != 2014,]
CleanClass2007to2013_3$Drafted <- as.factor(CleanClass2007to2013_3$Drafted)
Data2007to2013_tog <- CleanClass2007to2013_3 %>% select(-Position, -Class, -Name, -Player.Code, -Year,
-Safety) #this variable has zero variance hence it can not be standardized
# Testing data
CleanClass2014_3<- CleanClass2007to2014_3[CleanClass2007to2014_3$Year == 2014,]
CleanClass2014_3$Drafted <- as.factor(CleanClass2014_3$Drafted)
CleanClass2014_3_tog <- CleanClass2014_3 %>% select(-Position, -Class, -Name, -Player.Code, -Year,
-Safety) #this variable has zero variance hence it can not be standardized
#2 - KNN ----------
tr_control <- trainControl(method="repeatedcv", number=10, repeats = 3)
hyper_grid <- expand.grid(k = seq(3, 25, by = 1)) # Explanation of expand.grid() is in the ReadMe
set.seed(6969)
KNN_tog <- train(Drafted~.,
data=Data2007to2013_tog,
method="knn",
trControl=tr_control,
preProcess=c("center", "scale"),
tuneGrid=hyper_grid)
# Substract mean (="center") from each value and then divide this result by standard deviation (="scale").
# The standardized result ist the z-value.
# Predictions: 0.5 is used for probability cutoff value by default
predict_tog <- predict(KNN_tog,Data2007to2013_tog)
confusionMatrix(predict_tog,Data2007to2013_tog$Drafted)
CheckList_tog = cbind.data.frame(Data2007to2013_tog$Drafted,predict_tog)
names(CheckList_tog)[names(CheckList_tog)=="Data2007to2013_tog$Drafted"] <- "Y"
names(CheckList_tog)[names(CheckList_tog)=="predict_tog"] <- "Pred"
CheckList_tog = CheckList_tog %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
KNNPerfMeas[1,"Together_TP"] = sum(CheckList_tog$TP)
KNNPerfMeas[1,"Together_TN"] = sum(CheckList_tog$TN)
KNNPerfMeas[1,"Together_FP"] = sum(CheckList_tog$FP)
KNNPerfMeas[1,"Together_FN"] = sum(CheckList_tog$FN)
# For testing data
predict_togTest <- predict(KNN_tog,CleanClass2014_3_tog)
CheckList_togTest = cbind.data.frame(CleanClass2014_3_tog$Drafted,predict_togTest)
names(CheckList_togTest)[names(CheckList_togTest)=="CleanClass2014_3_tog$Drafted"] <- "Y"
names(CheckList_togTest)[names(CheckList_togTest)=="predict_togTest"] <- "Pred"
CheckList_togTest = CheckList_togTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
KNNPerfMeasTest[1,"Together_TP"] = sum(CheckList_togTest$TP)
KNNPerfMeasTest[1,"Together_TN"] = sum(CheckList_togTest$TN)
KNNPerfMeasTest[1,"Together_FP"] = sum(CheckList_togTest$FP)
KNNPerfMeasTest[1,"Together_FN"] = sum(CheckList_togTest$FN)
# II. KNN Classifier - 07 to 13, QB ----------
#1 - Preparations ----------
# Training data
Data2007to2013_QB <- CleanClass2007to2013_3[CleanClass2007to2013_3$Position=="QB", ]
Data2007to2013_QB <- Data2007to2013_QB %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety, -Kickoff.Ret.TD, -Punt.Ret.TD) #these variables have zero variance hence they can not be standardized
# Testing data
CleanClass2014_3_QB<- CleanClass2014_3[CleanClass2014_3$Position=="QB", ]
CleanClass2014_3_QB <- CleanClass2014_3_QB %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety, -Kickoff.Ret.TD, -Punt.Ret.TD) #these variables have zero variance hence they can not be standardized
#2 - KNN ----------
tr_control <- trainControl(method="repeatedcv", number=10, repeats = 3)
hyper_grid <- expand.grid(k = seq(3, 25, by = 1))
set.seed(6969)
KNN_QB <- train(Drafted~.,
data=Data2007to2013_QB,
method="knn",
trControl=tr_control,
preProcess=c("center", "scale"),
tuneGrid=hyper_grid)
# Substract mean (="center") from each value and then divide this result by standard deviation (="scale").
# The standardized result ist the z-value.
# Predictions
predict_QB <- predict(KNN_QB, newdata=Data2007to2013_QB)
confusionMatrix(predict_QB, Data2007to2013_QB$Drafted)
CheckList_QB = cbind.data.frame(Data2007to2013_QB$Drafted,predict_QB)
names(CheckList_QB)[names(CheckList_QB)=="Data2007to2013_QB$Drafted"] <- "Y"
names(CheckList_QB)[names(CheckList_QB)=="predict_QB"] <- "Pred"
CheckList_QB = CheckList_QB %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
KNNPerfMeas[1,"QB_TP"] = sum(CheckList_QB$TP)
KNNPerfMeas[1,"QB_TN"] = sum(CheckList_QB$TN)
KNNPerfMeas[1,"QB_FP"] = sum(CheckList_QB$FP)
KNNPerfMeas[1,"QB_FN"] = sum(CheckList_QB$FN)
# For testing data
predict_QBTest <- predict(KNN_QB,CleanClass2014_3_QB)
CheckList_QBTest = cbind.data.frame(CleanClass2014_3_QB$Drafted,predict_QBTest)
names(CheckList_QBTest)[names(CheckList_QBTest)=="CleanClass2014_3_QB$Drafted"] <- "Y"
names(CheckList_QBTest)[names(CheckList_QBTest)=="predict_QBTest"] <- "Pred"
CheckList_QBTest = CheckList_QBTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
KNNPerfMeasTest[1,"QB_TP"] = sum(CheckList_QBTest$TP)
KNNPerfMeasTest[1,"QB_TN"] = sum(CheckList_QBTest$TN)
KNNPerfMeasTest[1,"QB_FP"] = sum(CheckList_QBTest$FP)
KNNPerfMeasTest[1,"QB_FN"] = sum(CheckList_QBTest$FN)
# III. KNN Classifier - 07 to 13, WR ----------
#1 - Preparations ----------
# Training data
Data2007to2013_WR <- CleanClass2007to2013_3[CleanClass2007to2013_3$Position=="WR", ]
Data2007to2013_WR <- Data2007to2013_WR %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety) #these variables have zero variance hence they can not be standardized
# Testing data
CleanClass2014_3_WR<- CleanClass2014_3[CleanClass2014_3$Position=="WR", ]
CleanClass2014_3_WR <- CleanClass2014_3_WR %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety) #these variables have zero variance hence they can not be standardized
#2 - KNN ----------
tr_control <- trainControl(method="repeatedcv", number=10, repeats = 3)
hyper_grid <- expand.grid(k = seq(3, 25, by = 1))
set.seed(6969)
KNN_WR <- train(Drafted~.,
data=Data2007to2013_WR,
method="knn",
trControl=tr_control,
preProcess=c("center", "scale"),
tuneGrid=hyper_grid)
# Substract mean (="center") from each value and then divide this result by standard deviation (="scale").
# The standardized result ist the z-value.
# Predictions
predict_WR <- predict(KNN_WR, newdata=Data2007to2013_WR)
confusionMatrix(predict_WR, Data2007to2013_WR$Drafted)
CheckList_WR = cbind.data.frame(Data2007to2013_WR$Drafted,predict_WR)
names(CheckList_WR)[names(CheckList_WR)=="Data2007to2013_WR$Drafted"] <- "Y"
names(CheckList_WR)[names(CheckList_WR)=="predict_WR"] <- "Pred"
CheckList_WR = CheckList_WR %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
KNNPerfMeas[1,"WR_TP"] = sum(CheckList_WR$TP)
KNNPerfMeas[1,"WR_TN"] = sum(CheckList_WR$TN)
KNNPerfMeas[1,"WR_FP"] = sum(CheckList_WR$FP)
KNNPerfMeas[1,"WR_FN"] = sum(CheckList_WR$FN)
# For testing data
predict_WRTest <- predict(KNN_WR,CleanClass2014_3_WR)
CheckList_WRTest = cbind.data.frame(CleanClass2014_3_WR$Drafted,predict_WRTest)
names(CheckList_WRTest)[names(CheckList_WRTest)=="CleanClass2014_3_WR$Drafted"] <- "Y"
names(CheckList_WRTest)[names(CheckList_WRTest)=="predict_WRTest"] <- "Pred"
CheckList_WRTest = CheckList_WRTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
KNNPerfMeasTest[1,"WR_TP"] = sum(CheckList_WRTest$TP)
KNNPerfMeasTest[1,"WR_TN"] = sum(CheckList_WRTest$TN)
KNNPerfMeasTest[1,"WR_FP"] = sum(CheckList_WRTest$FP)
KNNPerfMeasTest[1,"WR_FN"] = sum(CheckList_WRTest$FN)
# IV. KNN Classifier - 07 to 13, RB ----------
#1 - Preparations ----------
# Training data
Data2007to2013_RB <- CleanClass2007to2013_3[CleanClass2007to2013_3$Position=="RB", ]
Data2007to2013_RB <- Data2007to2013_RB %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety) #these variables have zero variance hence they can not be standardized
# Testing data
CleanClass2014_3_RB <- CleanClass2014_3[CleanClass2014_3$Position=="RB", ]
CleanClass2014_3_RB <- CleanClass2014_3_RB %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety) #these variables have zero variance hence they can not be standardized
#2 - KNN ----------
tr_control <- trainControl(method="repeatedcv", number=10, repeats = 3)
hyper_grid <- expand.grid(k = seq(3, 25, by = 1))
set.seed(6969)
KNN_RB <- train(Drafted~.,
data=Data2007to2013_RB,
method="knn",
trControl=tr_control,
preProcess=c("center", "scale"),
tuneGrid=hyper_grid)
# Substract mean (="center") from each value and then divide this result by standard deviation (="scale").
# The standardized result ist the z-value.
# Predictions
predict_RB <- predict(KNN_RB, newdata=Data2007to2013_RB)
confusionMatrix(predict_RB, Data2007to2013_RB$Drafted)
CheckList_RB = cbind.data.frame(Data2007to2013_RB$Drafted,predict_RB)
names(CheckList_RB)[names(CheckList_RB)=="Data2007to2013_RB$Drafted"] <- "Y"
names(CheckList_RB)[names(CheckList_RB)=="predict_RB"] <- "Pred"
CheckList_RB = CheckList_RB %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
KNNPerfMeas[1,"RB_TP"] = sum(CheckList_RB$TP)
KNNPerfMeas[1,"RB_TN"] = sum(CheckList_RB$TN)
KNNPerfMeas[1,"RB_FP"] = sum(CheckList_RB$FP)
KNNPerfMeas[1,"RB_FN"] = sum(CheckList_RB$FN)
# For testing data
predict_RBTest <- predict(KNN_RB,CleanClass2014_3_RB)
CheckList_RBTest = cbind.data.frame(CleanClass2014_3_RB$Drafted,predict_RBTest)
names(CheckList_RBTest)[names(CheckList_RBTest)=="CleanClass2014_3_RB$Drafted"] <- "Y"
names(CheckList_RBTest)[names(CheckList_RBTest)=="predict_RBTest"] <- "Pred"
CheckList_RBTest = CheckList_RBTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
KNNPerfMeasTest[1,"RB_TP"] = sum(CheckList_RBTest$TP)
KNNPerfMeasTest[1,"RB_TN"] = sum(CheckList_RBTest$TN)
KNNPerfMeasTest[1,"RB_FP"] = sum(CheckList_RBTest$FP)
KNNPerfMeasTest[1,"RB_FN"] = sum(CheckList_RBTest$FN)
# 2. Oversampling ###################################################
load("../Data/CleanData/CleanClass2007to2013_3_oversampling.Rdata")
# I. KNN Classifier - 07 to 13, together ----------
#1 - Preparations ----------
# Training data
CleanClass2007to2014_3_oversampling$Drafted <- as.factor(CleanClass2007to2014_3_oversampling$Drafted)
Data2007to2013_togOS <- CleanClass2007to2014_3_oversampling %>% select(-Position, -Class, -Name, -Player.Code, -Year,
-Safety) #this variable has zero variance hence it can not be standardized
# Testing data
CleanClass2014_3_tog
#2 - KNN ----------
tr_control <- trainControl(method="repeatedcv", number=10, repeats = 3)
hyper_grid <- expand.grid(k = seq(3, 25, by = 1))
set.seed(6969)
KNN_togOS <- train(Drafted~.,
data=Data2007to2013_togOS,
method="knn",
trControl=tr_control,
preProcess=c("center", "scale"),
tuneGrid=hyper_grid)
# Predictions: 0.5 is used for probability cutoff value by default
predict_togOS <- predict(KNN_togOS,Data2007to2013_tog)
confusionMatrix(predict_togOS,Data2007to2013_tog$Drafted)
CheckList_togOS = cbind.data.frame(Data2007to2013_tog$Drafted,predict_togOS)
names(CheckList_togOS)[names(CheckList_togOS)=="Data2007to2013_tog$Drafted"] <- "Y"
names(CheckList_togOS)[names(CheckList_togOS)=="predict_togOS"] <- "Pred"
CheckList_togOS = CheckList_togOS %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
KNNPerfMeas[2,"Together_TP"] = sum(CheckList_togOS$TP)
KNNPerfMeas[2,"Together_TN"] = sum(CheckList_togOS$TN)
KNNPerfMeas[2,"Together_FP"] = sum(CheckList_togOS$FP)
KNNPerfMeas[2,"Together_FN"] = sum(CheckList_togOS$FN)
# For testing data
predict_togOSTest <- predict(KNN_togOS,CleanClass2014_3_tog)
CheckList_togOSTest = cbind.data.frame(CleanClass2014_3_tog$Drafted,predict_togOSTest)
names(CheckList_togOSTest)[names(CheckList_togOSTest)=="CleanClass2014_3_tog$Drafted"] <- "Y"
names(CheckList_togOSTest)[names(CheckList_togOSTest)=="predict_togOSTest"] <- "Pred"
CheckList_togOSTest = CheckList_togOSTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
KNNPerfMeasTest[2,"Together_TP"] = sum(CheckList_togOSTest$TP)
KNNPerfMeasTest[2,"Together_TN"] = sum(CheckList_togOSTest$TN)
KNNPerfMeasTest[2,"Together_FP"] = sum(CheckList_togOSTest$FP)
KNNPerfMeasTest[2,"Together_FN"] = sum(CheckList_togOSTest$FN)
# II. KNN Classifier - 07 to 13, QB ----------
#1 - Preparations ----------
# Training data
Data2007to2013_QBOS <- CleanClass2007to2014_3_oversampling[CleanClass2007to2014_3_oversampling$Position=="QB", ]
Data2007to2013_QBOS <- Data2007to2013_QBOS %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety, -Kickoff.Ret.TD, -Punt.Ret.TD) #these variables have zero variance hence they can not be standardized
# Testing data
CleanClass2014_3_QB
#2 - KNN ----------
tr_control <- trainControl(method="repeatedcv", number=10, repeats = 3)
hyper_grid <- expand.grid(k = seq(3, 25, by = 1))
set.seed(6969)
KNN_QBOS <- train(Drafted~.,
data=Data2007to2013_QBOS,
method="knn",
trControl=tr_control,
preProcess=c("center", "scale"),
tuneGrid=hyper_grid)
# Substract mean (="center") from each value and then divide this result by standard deviation (="scale").
# The standardized result ist the z-value.
# Predictions
predict_QBOS <- predict(KNN_QBOS, newdata=Data2007to2013_QB)
confusionMatrix(predict_QBOS, Data2007to2013_QB$Drafted)
CheckList_QBOS = cbind.data.frame(Data2007to2013_QB$Drafted,predict_QBOS)
names(CheckList_QBOS)[names(CheckList_QBOS)=="Data2007to2013_QB$Drafted"] <- "Y"
names(CheckList_QBOS)[names(CheckList_QBOS)=="predict_QBOS"] <- "Pred"
CheckList_QBOS = CheckList_QBOS %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
KNNPerfMeas[2,"QB_TP"] = sum(CheckList_QBOS$TP)
KNNPerfMeas[2,"QB_TN"] = sum(CheckList_QBOS$TN)
KNNPerfMeas[2,"QB_FP"] = sum(CheckList_QBOS$FP)
KNNPerfMeas[2,"QB_FN"] = sum(CheckList_QBOS$FN)
# For testing data
predict_QBOSTest <- predict(KNN_QBOS,CleanClass2014_3_QB)
CheckList_QBOSTest = cbind.data.frame(CleanClass2014_3_QB$Drafted,predict_QBOSTest)
names(CheckList_QBOSTest)[names(CheckList_QBOSTest)=="CleanClass2014_3_QB$Drafted"] <- "Y"
names(CheckList_QBOSTest)[names(CheckList_QBOSTest)=="predict_QBOSTest"] <- "Pred"
CheckList_QBOSTest = CheckList_QBOSTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
KNNPerfMeasTest[2,"QB_TP"] = sum(CheckList_QBOSTest$TP)
KNNPerfMeasTest[2,"QB_TN"] = sum(CheckList_QBOSTest$TN)
KNNPerfMeasTest[2,"QB_FP"] = sum(CheckList_QBOSTest$FP)
KNNPerfMeasTest[2,"QB_FN"] = sum(CheckList_QBOSTest$FN)
# III. KNN Classifier - 07 to 13, WR ----------
#1 - Preparations ----------
# Training data
Data2007to2013_WROS <- CleanClass2007to2014_3_oversampling[CleanClass2007to2014_3_oversampling$Position=="WR", ]
Data2007to2013_WROS <- Data2007to2013_WROS %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety) #these variables have zero variance hence they can not be standardized.
# Testing data
CleanClass2014_3_WR
#2 - KNN ----------
tr_control <- trainControl(method="repeatedcv", number=10, repeats = 3)
hyper_grid <- expand.grid(k = seq(3, 25, by = 1))
set.seed(6969)
KNN_WROS <- train(Drafted~.,
data=Data2007to2013_WROS,
method="knn",
trControl=tr_control,
preProcess=c("center", "scale"),
tuneGrid=hyper_grid)
# Substract mean (="center") from each value and then divide this result by standard deviation (="scale").
# The standardized result ist the z-value.
# Predictions
predict_WROS <- predict(KNN_WROS, newdata=Data2007to2013_WR)
confusionMatrix(predict_WROS, Data2007to2013_WR$Drafted)
CheckList_WROS = cbind.data.frame(Data2007to2013_WR$Drafted,predict_WROS)
names(CheckList_WROS)[names(CheckList_WROS)=="Data2007to2013_WR$Drafted"] <- "Y"
names(CheckList_WROS)[names(CheckList_WROS)=="predict_WROS"] <- "Pred"
CheckList_WROS = CheckList_WROS %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
KNNPerfMeas[2,"WR_TP"] = sum(CheckList_WROS$TP)
KNNPerfMeas[2,"WR_TN"] = sum(CheckList_WROS$TN)
KNNPerfMeas[2,"WR_FP"] = sum(CheckList_WROS$FP)
KNNPerfMeas[2,"WR_FN"] = sum(CheckList_WROS$FN)
# For testing data
predict_WROSTest <- predict(KNN_WROS,CleanClass2014_3_WR)
CheckList_WROSTest = cbind.data.frame(CleanClass2014_3_WR$Drafted,predict_WROSTest)
names(CheckList_WROSTest)[names(CheckList_WROSTest)=="CleanClass2014_3_WR$Drafted"] <- "Y"
names(CheckList_WROSTest)[names(CheckList_WROSTest)=="predict_WROSTest"] <- "Pred"
CheckList_WROSTest = CheckList_WROSTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
KNNPerfMeasTest[2,"WR_TP"] = sum(CheckList_WROSTest$TP)
KNNPerfMeasTest[2,"WR_TN"] = sum(CheckList_WROSTest$TN)
KNNPerfMeasTest[2,"WR_FP"] = sum(CheckList_WROSTest$FP)
KNNPerfMeasTest[2,"WR_FN"] = sum(CheckList_WROSTest$FN)
# IV. KNN Classifier - 07 to 13, RB ----------
#1 - Preparations ----------
# Training data
Data2007to2013_RBOS <- CleanClass2007to2014_3_oversampling[CleanClass2007to2014_3_oversampling$Position=="RB", ]
Data2007to2013_RBOS <- Data2007to2013_RBOS %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety) #these variables have zero variance hence they can not be standardized.
# Testing data
CleanClass2014_3_RB
#2 - KNN ----------
tr_control <- trainControl(method="repeatedcv", number=10, repeats = 3)
hyper_grid <- expand.grid(k = seq(3, 25, by = 1))
set.seed(6969)
KNN_RBOS <- train(Drafted~.,
data=Data2007to2013_RBOS,
method="knn",
trControl=tr_control,
preProcess=c("center", "scale"),
tuneGrid=hyper_grid)
# Substract mean (="center") from each value and then divide this result by standard deviation (="scale").
# The standardized result ist the z-value.
# Predictions
predict_RBOS <- predict(KNN_RBOS, newdata=Data2007to2013_RB)
confusionMatrix(predict_RBOS, Data2007to2013_RB$Drafted)
CheckList_RBOS = cbind.data.frame(Data2007to2013_RB$Drafted,predict_RBOS)
names(CheckList_RBOS)[names(CheckList_RBOS)=="Data2007to2013_RB$Drafted"] <- "Y"
names(CheckList_RBOS)[names(CheckList_RBOS)=="predict_RBOS"] <- "Pred"
CheckList_RBOS = CheckList_RBOS %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
KNNPerfMeas[2,"RB_TP"] = sum(CheckList_RBOS$TP)
KNNPerfMeas[2,"RB_TN"] = sum(CheckList_RBOS$TN)
KNNPerfMeas[2,"RB_FP"] = sum(CheckList_RBOS$FP)
KNNPerfMeas[2,"RB_FN"] = sum(CheckList_RBOS$FN)
# For testing data
predict_RBOSTest <- predict(KNN_RBOS,CleanClass2014_3_RB)
CheckList_RBOSTest = cbind.data.frame(CleanClass2014_3_RB$Drafted,predict_RBOSTest)
names(CheckList_RBOSTest)[names(CheckList_RBOSTest)=="CleanClass2014_3_RB$Drafted"] <- "Y"
names(CheckList_RBOSTest)[names(CheckList_RBOSTest)=="predict_RBOSTest"] <- "Pred"
CheckList_RBOSTest = CheckList_RBOSTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
KNNPerfMeasTest[2,"RB_TP"] = sum(CheckList_RBOSTest$TP)
KNNPerfMeasTest[2,"RB_TN"] = sum(CheckList_RBOSTest$TN)
KNNPerfMeasTest[2,"RB_FP"] = sum(CheckList_RBOSTest$FP)
KNNPerfMeasTest[2,"RB_FN"] = sum(CheckList_RBOSTest$FN)
# 3. Undersampling ###################################################
load("../Data/CleanData/CleanClass2007to2013_3_undersampling.Rdata")
# I. KNN Classifier - 07 to 13, together ----------
#1 - Preparations ----------
# Training data
CleanClass2007to2014_3_undersampling$Drafted <- as.factor(CleanClass2007to2014_3_undersampling$Drafted)
Data2007to2013_togUS <- CleanClass2007to2014_3_undersampling %>% select(-Position, -Class, -Name, -Player.Code, -Year,
-Safety) #this variable has zero variance hence it can not be standardized.
# Testing data
CleanClass2014_3_tog
#2 - KNN ----------
tr_control <- trainControl(method="repeatedcv", number=10, repeats = 3)
hyper_grid <- expand.grid(k = seq(3, 25, by = 1))
set.seed(6969)
KNN_togUS <- train(Drafted~.,
data=Data2007to2013_togUS,
method="knn",
trControl=tr_control,
preProcess=c("center", "scale"),
tuneGrid=hyper_grid)
# Predictions: 0.5 is used for probability cutoff value by default
predict_togUS <- predict(KNN_togUS,Data2007to2013_tog)
confusionMatrix(predict_togUS,Data2007to2013_tog$Drafted)
CheckList_togUS = cbind.data.frame(Data2007to2013_tog$Drafted,predict_togUS)
names(CheckList_togUS)[names(CheckList_togUS)=="Data2007to2013_tog$Drafted"] <- "Y"
names(CheckList_togUS)[names(CheckList_togUS)=="predict_togUS"] <- "Pred"
CheckList_togUS = CheckList_togUS %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
KNNPerfMeas[3,"Together_TP"] = sum(CheckList_togUS$TP)
KNNPerfMeas[3,"Together_TN"] = sum(CheckList_togUS$TN)
KNNPerfMeas[3,"Together_FP"] = sum(CheckList_togUS$FP)
KNNPerfMeas[3,"Together_FN"] = sum(CheckList_togUS$FN)
# For testing data
predict_togUSTest <- predict(KNN_togUS,CleanClass2014_3_tog)
CheckList_togUSTest = cbind.data.frame(CleanClass2014_3_tog$Drafted,predict_togUSTest)
names(CheckList_togUSTest)[names(CheckList_togUSTest)=="CleanClass2014_3_tog$Drafted"] <- "Y"
names(CheckList_togUSTest)[names(CheckList_togUSTest)=="predict_togUSTest"] <- "Pred"
CheckList_togUSTest = CheckList_togUSTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
KNNPerfMeasTest[3,"Together_TP"] = sum(CheckList_togUSTest$TP)
KNNPerfMeasTest[3,"Together_TN"] = sum(CheckList_togUSTest$TN)
KNNPerfMeasTest[3,"Together_FP"] = sum(CheckList_togUSTest$FP)
KNNPerfMeasTest[3,"Together_FN"] = sum(CheckList_togUSTest$FN)
# II. KNN Classifier - 07 to 13, QB ----------
#1 - Preparations ----------
# Training data
Data2007to2013_QBUS <- CleanClass2007to2014_3_undersampling[CleanClass2007to2014_3_undersampling$Position=="QB", ]
Data2007to2013_QBUS <- Data2007to2013_QBUS %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety, -Kickoff.Ret.TD, -Punt.Ret.TD, -Kickoff.Ret, -Kickoff.Ret.Yard, -Punt.Ret, -Punt.Ret.Yard) #these variables have zero variance hence they can not be standardized.
# Testing data
CleanClass2014_3_QB
#2 - KNN ----------
tr_control <- trainControl(method="repeatedcv", number=10, repeats = 3)
hyper_grid <- expand.grid(k = seq(3, 25, by = 1))
set.seed(6969)
KNN_QBUS <- train(Drafted~.,
data=Data2007to2013_QBUS,
method="knn",
trControl=tr_control,
preProcess=c("center", "scale"),
tuneGrid=hyper_grid)
# Substract mean (="center") from each value and then divide this result by standard deviation (="scale").
# The standardized result ist the z-value.
# Predictions
predict_QBUS <- predict(KNN_QBUS, newdata=Data2007to2013_QB)
confusionMatrix(predict_QBUS, Data2007to2013_QB$Drafted)
CheckList_QBUS = cbind.data.frame(Data2007to2013_QB$Drafted,predict_QBUS)
names(CheckList_QBUS)[names(CheckList_QBUS)=="Data2007to2013_QB$Drafted"] <- "Y"
names(CheckList_QBUS)[names(CheckList_QBUS)=="predict_QBUS"] <- "Pred"
CheckList_QBUS = CheckList_QBUS %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
KNNPerfMeas[3,"QB_TP"] = sum(CheckList_QBUS$TP)
KNNPerfMeas[3,"QB_TN"] = sum(CheckList_QBUS$TN)
KNNPerfMeas[3,"QB_FP"] = sum(CheckList_QBUS$FP)
KNNPerfMeas[3,"QB_FN"] = sum(CheckList_QBUS$FN)
# For testing data
predict_QBUSTest <- predict(KNN_QBUS,CleanClass2014_3_QB)
CheckList_QBUSTest = cbind.data.frame(CleanClass2014_3_QB$Drafted,predict_QBUSTest)
names(CheckList_QBUSTest)[names(CheckList_QBUSTest)=="CleanClass2014_3_QB$Drafted"] <- "Y"
names(CheckList_QBUSTest)[names(CheckList_QBUSTest)=="predict_QBUSTest"] <- "Pred"
CheckList_QBUSTest = CheckList_QBUSTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
KNNPerfMeasTest[3,"QB_TP"] = sum(CheckList_QBUSTest$TP)
KNNPerfMeasTest[3,"QB_TN"] = sum(CheckList_QBUSTest$TN)
KNNPerfMeasTest[3,"QB_FP"] = sum(CheckList_QBUSTest$FP)
KNNPerfMeasTest[3,"QB_FN"] = sum(CheckList_QBUSTest$FN)
# III. KNN Classifier - 07 to 13, WR ----------
#1 - Preparations ----------
# Training data
Data2007to2013_WRUS <- CleanClass2007to2014_3_undersampling[CleanClass2007to2014_3_undersampling$Position=="WR", ]
Data2007to2013_WRUS <- Data2007to2013_WRUS %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety) #these variables have zero variance hence they can not be standardized.
# Testing data
CleanClass2014_3_WR
#2 - KNN ----------
tr_control <- trainControl(method="repeatedcv", number=10, repeats = 3)
hyper_grid <- expand.grid(k = seq(3, 25, by = 1))
set.seed(6969)
KNN_WRUS <- train(Drafted~.,
data=Data2007to2013_WRUS,
method="knn",
trControl=tr_control,
preProcess=c("center", "scale"),
tuneGrid=hyper_grid)
# Substract mean (="center") from each value and then divide this result by standard deviation (="scale").
# The standardized result ist the z-value.
# Predictions
predict_WRUS <- predict(KNN_WRUS, newdata=Data2007to2013_WR)
confusionMatrix(predict_WRUS, Data2007to2013_WR$Drafted)
CheckList_WRUS = cbind.data.frame(Data2007to2013_WR$Drafted,predict_WRUS)
names(CheckList_WRUS)[names(CheckList_WRUS)=="Data2007to2013_WR$Drafted"] <- "Y"
names(CheckList_WRUS)[names(CheckList_WRUS)=="predict_WRUS"] <- "Pred"
CheckList_WRUS = CheckList_WRUS %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
KNNPerfMeas[3,"WR_TP"] = sum(CheckList_WRUS$TP)
KNNPerfMeas[3,"WR_TN"] = sum(CheckList_WRUS$TN)
KNNPerfMeas[3,"WR_FP"] = sum(CheckList_WRUS$FP)
KNNPerfMeas[3,"WR_FN"] = sum(CheckList_WRUS$FN)
# For testing data
predict_WRUSTest <- predict(KNN_WRUS,CleanClass2014_3_WR)
CheckList_WRUSTest = cbind.data.frame(CleanClass2014_3_WR$Drafted,predict_WRUSTest)
names(CheckList_WRUSTest)[names(CheckList_WRUSTest)=="CleanClass2014_3_WR$Drafted"] <- "Y"
names(CheckList_WRUSTest)[names(CheckList_WRUSTest)=="predict_WRUSTest"] <- "Pred"
CheckList_WRUSTest = CheckList_WRUSTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
KNNPerfMeasTest[3,"WR_TP"] = sum(CheckList_WRUSTest$TP)
KNNPerfMeasTest[3,"WR_TN"] = sum(CheckList_WRUSTest$TN)
KNNPerfMeasTest[3,"WR_FP"] = sum(CheckList_WRUSTest$FP)
KNNPerfMeasTest[3,"WR_FN"] = sum(CheckList_WRUSTest$FN)
# IV. KNN Classifier - 07 to 13, RB ----------
#1 - Preparations ----------
# Training data
Data2007to2013_RBUS <- CleanClass2007to2014_3_undersampling[CleanClass2007to2014_3_undersampling$Position=="RB", ]
Data2007to2013_RBUS <- Data2007to2013_RBUS %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety, -Pass.Conv) #these variables have zero variance hence they can not be standardized.
# Testing data
CleanClass2014_3_RB
#2 - KNN ----------
tr_control <- trainControl(method="repeatedcv", number=10, repeats = 3)
hyper_grid <- expand.grid(k = seq(3, 25, by = 1))
set.seed(6969)
KNN_RBUS <- train(Drafted~.,
data=Data2007to2013_RBUS,
method="knn",
trControl=tr_control,
preProcess=c("center", "scale"),
tuneGrid=hyper_grid)
# Substract mean (="center") from each value and then divide this result by standard deviation (="scale").
# The standardized result ist the z-value.
# Predictions
predict_RBUS <- predict(KNN_RBUS, newdata=Data2007to2013_RB)
confusionMatrix(predict_RBUS, Data2007to2013_RB$Drafted)
CheckList_RBUS = cbind.data.frame(Data2007to2013_RB$Drafted,predict_RBUS)
names(CheckList_RBUS)[names(CheckList_RBUS)=="Data2007to2013_RB$Drafted"] <- "Y"
names(CheckList_RBUS)[names(CheckList_RBUS)=="predict_RBUS"] <- "Pred"
CheckList_RBUS = CheckList_RBUS %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
KNNPerfMeas[3,"RB_TP"] = sum(CheckList_RBUS$TP)
KNNPerfMeas[3,"RB_TN"] = sum(CheckList_RBUS$TN)
KNNPerfMeas[3,"RB_FP"] = sum(CheckList_RBUS$FP)
KNNPerfMeas[3,"RB_FN"] = sum(CheckList_RBUS$FN)
# For testing data
predict_RBUSTest <- predict(KNN_RBUS,CleanClass2014_3_RB)
CheckList_RBUSTest = cbind.data.frame(CleanClass2014_3_RB$Drafted,predict_RBUSTest)
names(CheckList_RBUSTest)[names(CheckList_RBUSTest)=="CleanClass2014_3_RB$Drafted"] <- "Y"
names(CheckList_RBUSTest)[names(CheckList_RBUSTest)=="predict_RBUSTest"] <- "Pred"
CheckList_RBUSTest = CheckList_RBUSTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
KNNPerfMeasTest[3,"RB_TP"] = sum(CheckList_RBUSTest$TP)
KNNPerfMeasTest[3,"RB_TN"] = sum(CheckList_RBUSTest$TN)
KNNPerfMeasTest[3,"RB_FP"] = sum(CheckList_RBUSTest$FP)
KNNPerfMeasTest[3,"RB_FN"] = sum(CheckList_RBUSTest$FN)
# 4. Rose_both ###################################################
load("../Data/CleanData/CleanClass2007to2013_3_Rose.both.Rdata")
# I. KNN Classifier - 07 to 13, together ----------
#1 - Preparations ----------
# Training data
CleanClass2007to2014_3_Rose.both$Drafted <- as.factor(CleanClass2007to2014_3_Rose.both$Drafted)
Data2007to2013_togBO <- CleanClass2007to2014_3_Rose.both %>% select(-Position, -Class, -Name, -Player.Code, -Year,
-Safety) #this variable has zero variance hence it can not be standardized.
# Testing data
CleanClass2014_3_tog
#2 - KNN ----------
tr_control <- trainControl(method="repeatedcv", number=10, repeats = 3)
hyper_grid <- expand.grid(k = seq(3, 25, by = 1))
set.seed(6969)
KNN_togBO <- train(Drafted~.,
data=Data2007to2013_togBO,
method="knn",
trControl=tr_control,
preProcess=c("center", "scale"),
tuneGrid=hyper_grid)
# Predictions: 0.5 is used for probability cutoff value by default
predict_togBO <- predict(KNN_togBO,Data2007to2013_tog)
confusionMatrix(predict_togBO,Data2007to2013_tog$Drafted)
CheckList_togBO = cbind.data.frame(Data2007to2013_tog$Drafted,predict_togBO)
names(CheckList_togBO)[names(CheckList_togBO)=="Data2007to2013_tog$Drafted"] <- "Y"
names(CheckList_togBO)[names(CheckList_togBO)=="predict_togBO"] <- "Pred"
CheckList_togBO = CheckList_togBO %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
KNNPerfMeas[4,"Together_TP"] = sum(CheckList_togBO$TP)
KNNPerfMeas[4,"Together_TN"] = sum(CheckList_togBO$TN)
KNNPerfMeas[4,"Together_FP"] = sum(CheckList_togBO$FP)
KNNPerfMeas[4,"Together_FN"] = sum(CheckList_togBO$FN)
# For testing data
predict_togBOTest <- predict(KNN_togBO,CleanClass2014_3_tog)
CheckList_togBOTest = cbind.data.frame(CleanClass2014_3_tog$Drafted,predict_togBOTest)
names(CheckList_togBOTest)[names(CheckList_togBOTest)=="CleanClass2014_3_tog$Drafted"] <- "Y"
names(CheckList_togBOTest)[names(CheckList_togBOTest)=="predict_togBOTest"] <- "Pred"
CheckList_togBOTest = CheckList_togBOTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
KNNPerfMeasTest[4,"Together_TP"] = sum(CheckList_togBOTest$TP)
KNNPerfMeasTest[4,"Together_TN"] = sum(CheckList_togBOTest$TN)
KNNPerfMeasTest[4,"Together_FP"] = sum(CheckList_togBOTest$FP)
KNNPerfMeasTest[4,"Together_FN"] = sum(CheckList_togBOTest$FN)
# II. KNN Classifier - 07 to 13, QB ----------
#1 - Preparations ----------
# Training data
Data2007to2013_QBBO <- CleanClass2007to2014_3_Rose.both[CleanClass2007to2014_3_Rose.both$Position=="QB", ]
Data2007to2013_QBBO <- Data2007to2013_QBBO %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety, -Kickoff.Ret.TD, -Punt.Ret.TD) #these variables have zero variance hence they can not be standardized.
# Testing data
CleanClass2014_3_QB
#2 - KNN ----------
tr_control <- trainControl(method="repeatedcv", number=10, repeats = 3)
hyper_grid <- expand.grid(k = seq(3, 25, by = 1))
set.seed(6969)
KNN_QBBO <- train(Drafted~.,
data=Data2007to2013_QBBO,
method="knn",
trControl=tr_control,
preProcess=c("center", "scale"),
tuneGrid=hyper_grid)
# Substract mean (="center") from each value and then divide this result by standard deviation (="scale").
# The standardized result ist the z-value.
# Predictions
predict_QBBO <- predict(KNN_QBBO, newdata=Data2007to2013_QB)
confusionMatrix(predict_QBBO, Data2007to2013_QB$Drafted)
CheckList_QBBO = cbind.data.frame(Data2007to2013_QB$Drafted,predict_QBBO)
names(CheckList_QBBO)[names(CheckList_QBBO)=="Data2007to2013_QB$Drafted"] <- "Y"
names(CheckList_QBBO)[names(CheckList_QBBO)=="predict_QBBO"] <- "Pred"
CheckList_QBBO = CheckList_QBBO %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
KNNPerfMeas[4,"QB_TP"] = sum(CheckList_QBBO$TP)
KNNPerfMeas[4,"QB_TN"] = sum(CheckList_QBBO$TN)
KNNPerfMeas[4,"QB_FP"] = sum(CheckList_QBBO$FP)
KNNPerfMeas[4,"QB_FN"] = sum(CheckList_QBBO$FN)
# For testing data
predict_QBBOTest <- predict(KNN_QBBO,CleanClass2014_3_QB)
CheckList_QBBOTest = cbind.data.frame(CleanClass2014_3_QB$Drafted,predict_QBBOTest)
names(CheckList_QBBOTest)[names(CheckList_QBBOTest)=="CleanClass2014_3_QB$Drafted"] <- "Y"
names(CheckList_QBBOTest)[names(CheckList_QBBOTest)=="predict_QBBOTest"] <- "Pred"
CheckList_QBBOTest = CheckList_QBBOTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
KNNPerfMeasTest[4,"QB_TP"] = sum(CheckList_QBBOTest$TP)
KNNPerfMeasTest[4,"QB_TN"] = sum(CheckList_QBBOTest$TN)
KNNPerfMeasTest[4,"QB_FP"] = sum(CheckList_QBBOTest$FP)
KNNPerfMeasTest[4,"QB_FN"] = sum(CheckList_QBBOTest$FN)
# III. KNN Classifier - 07 to 13, WR ----------
#1 - Preparations ----------
# Training data
Data2007to2013_WRBO <- CleanClass2007to2014_3_Rose.both[CleanClass2007to2014_3_Rose.both$Position=="WR", ]
Data2007to2013_WRBO <- Data2007to2013_WRBO %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety) #these variables have zero variance hence they can not be standardized.
# Testing data
CleanClass2014_3_WR
#2 - KNN ----------
tr_control <- trainControl(method="repeatedcv", number=10, repeats = 3)
hyper_grid <- expand.grid(k = seq(3, 25, by = 1))
set.seed(6969)
KNN_WRBO <- train(Drafted~.,
data=Data2007to2013_WRBO,
method="knn",
trControl=tr_control,
preProcess=c("center", "scale"),
tuneGrid=hyper_grid)
# Substract mean (="center") from each value and then divide this result by standard deviation (="scale").
# The standardized result ist the z-value.
# Predictions
predict_WRBO <- predict(KNN_WRBO, newdata=Data2007to2013_WR)
confusionMatrix(predict_WRBO, Data2007to2013_WR$Drafted)
CheckList_WRBO = cbind.data.frame(Data2007to2013_WR$Drafted,predict_WRBO)
names(CheckList_WRBO)[names(CheckList_WRBO)=="Data2007to2013_WR$Drafted"] <- "Y"
names(CheckList_WRBO)[names(CheckList_WRBO)=="predict_WRBO"] <- "Pred"
CheckList_WRBO = CheckList_WRBO %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
KNNPerfMeas[4,"WR_TP"] = sum(CheckList_WRBO$TP)
KNNPerfMeas[4,"WR_TN"] = sum(CheckList_WRBO$TN)
KNNPerfMeas[4,"WR_FP"] = sum(CheckList_WRBO$FP)
KNNPerfMeas[4,"WR_FN"] = sum(CheckList_WRBO$FN)
# For testing data
predict_WRBOTest <- predict(KNN_WRBO,CleanClass2014_3_WR)
CheckList_WRBOTest = cbind.data.frame(CleanClass2014_3_WR$Drafted,predict_WRBOTest)
names(CheckList_WRBOTest)[names(CheckList_WRBOTest)=="CleanClass2014_3_WR$Drafted"] <- "Y"
names(CheckList_WRBOTest)[names(CheckList_WRBOTest)=="predict_WRBOTest"] <- "Pred"
CheckList_WRBOTest = CheckList_WRBOTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
KNNPerfMeasTest[4,"WR_TP"] = sum(CheckList_WRBOTest$TP)
KNNPerfMeasTest[4,"WR_TN"] = sum(CheckList_WRBOTest$TN)
KNNPerfMeasTest[4,"WR_FP"] = sum(CheckList_WRBOTest$FP)
KNNPerfMeasTest[4,"WR_FN"] = sum(CheckList_WRBOTest$FN)
# IV. KNN Classifier - 07 to 13, RB ----------
#1 - Preparations ----------
# Training data
Data2007to2013_RBBO <- CleanClass2007to2014_3_Rose.both[CleanClass2007to2014_3_Rose.both$Position=="RB", ]
Data2007to2013_RBBO <- Data2007to2013_RBBO %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety) #these variables have zero variance hence they can not be standardized.
# Testing data
CleanClass2014_3_RB
#2 - KNN ----------
tr_control <- trainControl(method="repeatedcv", number=10, repeats = 3)
hyper_grid <- expand.grid(k = seq(3, 25, by = 1))
set.seed(6969)
KNN_RBBO <- train(Drafted~.,
data=Data2007to2013_RBBO,
method="knn",
trControl=tr_control,
preProcess=c("center", "scale"),
tuneGrid=hyper_grid)
# Substract mean (="center") from each value and then divide this result by standard deviation (="scale").
# The standardized result ist the z-value.
# Predictions
predict_RBBO <- predict(KNN_RBBO, newdata=Data2007to2013_RB)
confusionMatrix(predict_RBBO, Data2007to2013_RB$Drafted)
CheckList_RBBO = cbind.data.frame(Data2007to2013_RB$Drafted,predict_RBBO)
names(CheckList_RBBO)[names(CheckList_RBBO)=="Data2007to2013_RB$Drafted"] <- "Y"
names(CheckList_RBBO)[names(CheckList_RBBO)=="predict_RBBO"] <- "Pred"
CheckList_RBBO = CheckList_RBBO %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
KNNPerfMeas[4,"RB_TP"] = sum(CheckList_RBBO$TP)
KNNPerfMeas[4,"RB_TN"] = sum(CheckList_RBBO$TN)
KNNPerfMeas[4,"RB_FP"] = sum(CheckList_RBBO$FP)
KNNPerfMeas[4,"RB_FN"] = sum(CheckList_RBBO$FN)
# For testing data
predict_RBBOTest <- predict(KNN_RBBO,CleanClass2014_3_RB)
CheckList_RBBOTest = cbind.data.frame(CleanClass2014_3_RB$Drafted,predict_RBBOTest)
names(CheckList_RBBOTest)[names(CheckList_RBBOTest)=="CleanClass2014_3_RB$Drafted"] <- "Y"
names(CheckList_RBBOTest)[names(CheckList_RBBOTest)=="predict_RBBOTest"] <- "Pred"
CheckList_RBBOTest = CheckList_RBBOTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
KNNPerfMeasTest[4,"RB_TP"] = sum(CheckList_RBBOTest$TP)
KNNPerfMeasTest[4,"RB_TN"] = sum(CheckList_RBBOTest$TN)
KNNPerfMeasTest[4,"RB_FP"] = sum(CheckList_RBBOTest$FP)
KNNPerfMeasTest[4,"RB_FN"] = sum(CheckList_RBBOTest$FN)
# 5. Smote ###################################################
load("../Data/CleanData/CleanClass2007to2013_3_smote.Rdata")
# I. KNN Classifier - 07 to 13, together ----------
#1 - Preparations ----------
# Training data
cleanData_smote$Drafted <- as.factor(cleanData_smote$Drafted)
Data2007to2013_togSM <- cleanData_smote %>% select(-Position, -Name, -Player.Code, -Year,
-Safety) #this variable has zero variance hence it can not be standardized.
# Testing data
CleanClass2014_3_tog
#2 - KNN ----------
tr_control <- trainControl(method="repeatedcv", number=10, repeats = 3)
hyper_grid <- expand.grid(k = seq(3, 25, by = 1))
set.seed(6969)
KNN_togSM <- train(Drafted~.,
data=Data2007to2013_togSM,
method="knn",
trControl=tr_control,
preProcess=c("center", "scale"),
tuneGrid=hyper_grid)
# Predictions: 0.5 is used for probability cutoff value by default
predict_togSM <- predict(KNN_togSM,Data2007to2013_tog)
confusionMatrix(predict_togSM,Data2007to2013_tog$Drafted)
CheckList_togSM = cbind.data.frame(Data2007to2013_tog$Drafted,predict_togSM)
names(CheckList_togSM)[names(CheckList_togSM)=="Data2007to2013_tog$Drafted"] <- "Y"
names(CheckList_togSM)[names(CheckList_togSM)=="predict_togSM"] <- "Pred"
CheckList_togSM = CheckList_togSM %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
KNNPerfMeas[5,"Together_TP"] = sum(CheckList_togSM$TP)
KNNPerfMeas[5,"Together_TN"] = sum(CheckList_togSM$TN)
KNNPerfMeas[5,"Together_FP"] = sum(CheckList_togSM$FP)
KNNPerfMeas[5,"Together_FN"] = sum(CheckList_togSM$FN)
# For testing data
predict_togSMTest <- predict(KNN_togSM,CleanClass2014_3_tog)
CheckList_togSMTest = cbind.data.frame(CleanClass2014_3_tog$Drafted,predict_togSMTest)
names(CheckList_togSMTest)[names(CheckList_togSMTest)=="CleanClass2014_3_tog$Drafted"] <- "Y"
names(CheckList_togSMTest)[names(CheckList_togSMTest)=="predict_togSMTest"] <- "Pred"
CheckList_togSMTest = CheckList_togSMTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
KNNPerfMeasTest[5,"Together_TP"] = sum(CheckList_togSMTest$TP)
KNNPerfMeasTest[5,"Together_TN"] = sum(CheckList_togSMTest$TN)
KNNPerfMeasTest[5,"Together_FP"] = sum(CheckList_togSMTest$FP)
KNNPerfMeasTest[5,"Together_FN"] = sum(CheckList_togSMTest$FN)
# II. KNN Classifier - 07 to 13, QB ----------
#1 - Preparations ----------
# Training data
Data2007to2013_QBSM <- cleanData_smote[cleanData_smote$Position=="QB", ]
Data2007to2013_QBSM <- Data2007to2013_QBSM %>% select(-Position, -Name, -Player.Code, -Year,
-Safety, -Kickoff.Ret.TD, -Punt.Ret.TD) #these variables have zero variance hence they can not be standardized.
# Testing data
CleanClass2014_3_QB
#2 - KNN ----------
tr_control <- trainControl(method="repeatedcv", number=10, repeats = 3)
hyper_grid <- expand.grid(k = seq(3, 25, by = 1))
set.seed(6969)
KNN_QBSM <- train(Drafted~.,
data=Data2007to2013_QBSM,
method="knn",
trControl=tr_control,
preProcess=c("center", "scale"),
tuneGrid=hyper_grid)
# Substract mean (="center") from each value and then divide this result by standard deviation (="scale").
# The standardized result ist the z-value.
# Predictions
predict_QBSM <- predict(KNN_QBSM, newdata=Data2007to2013_QB)
confusionMatrix(predict_QBSM, Data2007to2013_QB$Drafted)
CheckList_QBSM = cbind.data.frame(Data2007to2013_QB$Drafted,predict_QBSM)
names(CheckList_QBSM)[names(CheckList_QBSM)=="Data2007to2013_QB$Drafted"] <- "Y"
names(CheckList_QBSM)[names(CheckList_QBSM)=="predict_QBSM"] <- "Pred"
CheckList_QBSM = CheckList_QBSM %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
KNNPerfMeas[5,"QB_TP"] = sum(CheckList_QBSM$TP)
KNNPerfMeas[5,"QB_TN"] = sum(CheckList_QBSM$TN)
KNNPerfMeas[5,"QB_FP"] = sum(CheckList_QBSM$FP)
KNNPerfMeas[5,"QB_FN"] = sum(CheckList_QBSM$FN)
# For testing data
predict_QBSMTest <- predict(KNN_QBSM,CleanClass2014_3_QB)
CheckList_QBSMTest = cbind.data.frame(CleanClass2014_3_QB$Drafted,predict_QBSMTest)
names(CheckList_QBSMTest)[names(CheckList_QBSMTest)=="CleanClass2014_3_QB$Drafted"] <- "Y"
names(CheckList_QBSMTest)[names(CheckList_QBSMTest)=="predict_QBSMTest"] <- "Pred"
CheckList_QBSMTest = CheckList_QBSMTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
KNNPerfMeasTest[5,"QB_TP"] = sum(CheckList_QBSMTest$TP)
KNNPerfMeasTest[5,"QB_TN"] = sum(CheckList_QBSMTest$TN)
KNNPerfMeasTest[5,"QB_FP"] = sum(CheckList_QBSMTest$FP)
KNNPerfMeasTest[5,"QB_FN"] = sum(CheckList_QBSMTest$FN)
# III. KNN Classifier - 07 to 13, WR ----------
#1 - Preparations ----------
# Training data
Data2007to2013_WRSM <- cleanData_smote[cleanData_smote$Position=="WR", ]
Data2007to2013_WRSM <- Data2007to2013_WRSM %>% select(-Position, -Name, -Player.Code, -Year,
-Safety) #these variables have zero variance hence they can not be standardized.
# Testing data
CleanClass2014_3_WR
#2 - KNN ----------
tr_control <- trainControl(method="repeatedcv", number=10, repeats = 3)
hyper_grid <- expand.grid(k = seq(3, 25, by = 1))
set.seed(6969)
KNN_WRSM <- train(Drafted~.,
data=Data2007to2013_WRSM,
method="knn",
trControl=tr_control,
preProcess=c("center", "scale"),
tuneGrid=hyper_grid)
# Substract mean (="center") from each value and then divide this result by standard deviation (="scale").
# The standardized result ist the z-value.
# Predictions
predict_WRSM <- predict(KNN_WRSM, newdata=Data2007to2013_WR)
confusionMatrix(predict_WRSM, Data2007to2013_WR$Drafted)
CheckList_WRSM = cbind.data.frame(Data2007to2013_WR$Drafted,predict_WRSM)
names(CheckList_WRSM)[names(CheckList_WRSM)=="Data2007to2013_WR$Drafted"] <- "Y"
names(CheckList_WRSM)[names(CheckList_WRSM)=="predict_WRSM"] <- "Pred"
CheckList_WRSM = CheckList_WRSM %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
KNNPerfMeas[5,"WR_TP"] = sum(CheckList_WRSM$TP)
KNNPerfMeas[5,"WR_TN"] = sum(CheckList_WRSM$TN)
KNNPerfMeas[5,"WR_FP"] = sum(CheckList_WRSM$FP)
KNNPerfMeas[5,"WR_FN"] = sum(CheckList_WRSM$FN)
# For testing data
predict_WRSMTest <- predict(KNN_WRSM,CleanClass2014_3_WR)
CheckList_WRSMTest = cbind.data.frame(CleanClass2014_3_WR$Drafted,predict_WRSMTest)
names(CheckList_WRSMTest)[names(CheckList_WRSMTest)=="CleanClass2014_3_WR$Drafted"] <- "Y"
names(CheckList_WRSMTest)[names(CheckList_WRSMTest)=="predict_WRSMTest"] <- "Pred"
CheckList_WRSMTest = CheckList_WRSMTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
KNNPerfMeasTest[5,"WR_TP"] = sum(CheckList_WRSMTest$TP)
KNNPerfMeasTest[5,"WR_TN"] = sum(CheckList_WRSMTest$TN)
KNNPerfMeasTest[5,"WR_FP"] = sum(CheckList_WRSMTest$FP)
KNNPerfMeasTest[5,"WR_FN"] = sum(CheckList_WRSMTest$FN)
# IV. KNN Classifier - 07 to 13, RB ----------
#1 - Preparations ----------
# Training data
Data2007to2013_RBSM <- cleanData_smote[cleanData_smote$Position=="RB", ]
Data2007to2013_RBSM <- Data2007to2013_RBSM %>% select(-Position, -Name, -Player.Code,-Year,
-Safety) #these variables have zero variance hence they can not be standardized.
# Testing data
CleanClass2014_3_RB
#2 - KNN ----------
tr_control <- trainControl(method="repeatedcv", number=10, repeats = 3)
hyper_grid <- expand.grid(k = seq(3, 25, by = 1))
set.seed(6969)
KNN_RBSM <- train(Drafted~.,
data=Data2007to2013_RBSM,
method="knn",
trControl=tr_control,
preProcess=c("center", "scale"),
tuneGrid=hyper_grid)
# Substract mean (="center") from each value and then divide this result by standard deviation (="scale").
# The standardized result ist the z-value.
# Predictions
predict_RBSM <- predict(KNN_RBSM, newdata=Data2007to2013_RB)
confusionMatrix(predict_RBSM, Data2007to2013_RB$Drafted)
CheckList_RBSM = cbind.data.frame(Data2007to2013_RB$Drafted,predict_RBSM)
names(CheckList_RBSM)[names(CheckList_RBSM)=="Data2007to2013_RB$Drafted"] <- "Y"
names(CheckList_RBSM)[names(CheckList_RBSM)=="predict_RBSM"] <- "Pred"
CheckList_RBSM = CheckList_RBSM %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
KNNPerfMeas[5,"RB_TP"] = sum(CheckList_RBSM$TP)
KNNPerfMeas[5,"RB_TN"] = sum(CheckList_RBSM$TN)
KNNPerfMeas[5,"RB_FP"] = sum(CheckList_RBSM$FP)
KNNPerfMeas[5,"RB_FN"] = sum(CheckList_RBSM$FN)
# For testing data
predict_RBSMTest <- predict(KNN_RBSM,CleanClass2014_3_RB)
CheckList_RBSMTest = cbind.data.frame(CleanClass2014_3_RB$Drafted,predict_RBSMTest)
names(CheckList_RBSMTest)[names(CheckList_RBSMTest)=="CleanClass2014_3_RB$Drafted"] <- "Y"
names(CheckList_RBSMTest)[names(CheckList_RBSMTest)=="predict_RBSMTest"] <- "Pred"
CheckList_RBSMTest = CheckList_RBSMTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
KNNPerfMeasTest[5,"RB_TP"] = sum(CheckList_RBSMTest$TP)
KNNPerfMeasTest[5,"RB_TN"] = sum(CheckList_RBSMTest$TN)
KNNPerfMeasTest[5,"RB_FP"] = sum(CheckList_RBSMTest$FP)
KNNPerfMeasTest[5,"RB_FN"] = sum(CheckList_RBSMTest$FN)
# 6. Save KNNPerfMeas as a new dataset ###################################################
# Training
save(KNNPerfMeas, file="../Data/PerformanceMeasurement/KNNPerfMeas.Rdata")
# Testing
save(KNNPerfMeasTest, file="../Data/PerformanceMeasurement/KNNPerfMeasTest.Rdata")
# 7. Plots and Table for ReadMe ###################################################
# Note the value of "k" for every model; This is illustrated in the ReadMe.
KNN_K = data.frame(Sampling = character(),Together=integer(), QB=integer(), WR=integer(), RB=integer(),stringsAsFactors = FALSE)
KNN_K[1,1] = "no_sampling"
KNN_K[2,1] = "oversampling"
KNN_K[3,1] = "undersampling"
KNN_K[4,1] = "Rose_both"
KNN_K[5,1] = "Smote"
# Plots of Models and keep track of the used k ----------
# Exemplarily used for discussion in the ReadMe
# no sampling
KNN_K[1,2] = 18 #KNN_tog
KNN_K[1,3] = 18 #KNN_QB
KNN_K[1,4] = 17 #KNN_WR
KNN_K[1,5] = 24 #KNN_RB
ggplot(KNN_tog)+
ggtitle("No Sampling - Together")+
geom_segment(x = 18, y = 0, xend = 18, yend = 0.8665551, linetype="dashed", color = "blue", alpha=0.5)+
geom_segment(x = 0, y = 0.8665551, xend = 18, yend = 0.8665551, linetype="dashed", color = "blue", alpha=0.5)+
annotate("text", x = 19.5, y = 0.8665551, label = "k=18", color="blue")
ggplot(KNN_QB)+
ggtitle("No Sampling - QB")+
geom_segment(x = 18, y = 0, xend = 18, yend = 0.8417228, linetype="dashed", color = "blue", alpha=0.5)+
geom_segment(x = 0, y = 0.8417228, xend = 18, yend = 0.8417228, linetype="dashed", color = "blue", alpha=0.5)+
annotate("text", x = 19.25, y = 0.8428228, label = "k=18", color="blue")
ggplot(KNN_WR)+
ggtitle("No Sampling - WR")+
geom_segment(x = 17, y = 0, xend = 17, yend = 0.8858957 , linetype="dashed", color = "blue", alpha=0.5)+
geom_segment(x = 0, y = 0.8858957 , xend = 17, yend = 0.8858957 , linetype="dashed", color = "blue", alpha=0.5)+
annotate("text", x = 18, y = 0.8868957 , label = "k=17", color="blue")
ggplot(KNN_RB)+
ggtitle("No Sampling - RB")+
geom_segment(x = 24, y = 0, xend = 24, yend = 0.8614354 , linetype="dashed", color = "blue", alpha=0.5)+
geom_segment(x = 0, y = 0.8614354 , xend = 24, yend = 0.8614354 , linetype="dashed", color = "blue", alpha=0.5)+
annotate("text", x = 25.5, y = 0.8614354 , label = "k=24", color="blue")
# oversampling
KNN_K[2,2] = 3 #KNN_togOS
KNN_K[2,3] = 3 #KNN_QBOS
KNN_K[2,4] = 3 #KNN_WROS
KNN_K[2,5] = 3 #KNN_RBOS
ggplot(KNN_togOS)+
ggtitle("Oversampling - Together")+
geom_segment(x = 3, y = 0, xend = 3, yend = 0.8903520, linetype="dashed", color = "blue", alpha=0.5)+
geom_segment(x = 0, y = 0.8903520, xend = 3, yend = 0.8903520, linetype="dashed", color = "blue", alpha=0.5)+
annotate("text", x = 4.5, y = 0.8903520, label = "k=3", color="blue")
ggplot(KNN_QBOS)+
ggtitle("Oversampling - QB")+
geom_segment(x = 3, y = 0, xend = 3, yend = 0.8648862, linetype="dashed", color = "blue", alpha=0.5)+
geom_segment(x = 0, y = 0.8648862, xend = 3, yend = 0.8648862, linetype="dashed", color = "blue", alpha=0.5)+
annotate("text", x = 4.5, y = 0.8648862, label = "k=3", color="blue")
ggplot(KNN_WROS)+
ggtitle("Oversampling - WR")+
geom_segment(x = 3, y = 0, xend = 3, yend = 0.9010035 , linetype="dashed", color = "blue", alpha=0.5)+
geom_segment(x = 0, y = 0.9010035 , xend = 3, yend = 0.9010035 , linetype="dashed", color = "blue", alpha=0.5)+
annotate("text", x = 4.5, y = 0.9010035 , label = "k=3", color="blue")
ggplot(KNN_RBOS)+
ggtitle("Oversampling - RB")+
geom_segment(x = 3, y = 0, xend = 3, yend = 0.8847267 , linetype="dashed", color = "blue", alpha=0.5)+
geom_segment(x = 0, y = 0.8847267 , xend = 3, yend = 0.8847267 , linetype="dashed", color = "blue", alpha=0.5)+
annotate("text", x = 4.5, y = 0.8847267 , label = "k=3", color="blue")
# undersampling
KNN_K[3,2] = 19 #KNN_togUS
KNN_K[3,3] = 16 #KNN_QBUS
KNN_K[3,4] = 19 #KNN_WRUS
KNN_K[3,5] = 11 #KNN_RBUS
ggplot(KNN_togUS)+
ggtitle("Undersampling - Together")+
geom_segment(x = 19, y = 0, xend = 19, yend = 0.7956915, linetype="dashed", color = "blue", alpha=0.5)+
geom_segment(x = 0, y = 0.7956915, xend = 19, yend = 0.7956915, linetype="dashed", color = "blue", alpha=0.5)+
annotate("text", x = 20.5, y = 0.7956915, label = "k=19", color="blue")
ggplot(KNN_QBUS)+
ggtitle("Undersampling - QB")+
geom_segment(x = 16, y = 0, xend = 16, yend = 0.8111888, linetype="dashed", color = "blue", alpha=0.5)+
geom_segment(x = 0, y = 0.8111888, xend = 16, yend = 0.8111888, linetype="dashed", color = "blue", alpha=0.5)+
annotate("text", x = 17, y = 0.8131888, label = "k=16", color="blue")
ggplot(KNN_WRUS)+
ggtitle("Undersampling - WR")+
geom_segment(x = 19, y = 0, xend = 19, yend = 0.7848363 , linetype="dashed", color = "blue", alpha=0.5)+
geom_segment(x = 0, y = 0.7848363 , xend = 19, yend = 0.7848363 , linetype="dashed", color = "blue", alpha=0.5)+
annotate("text", x = 20, y = 0.7878363 , label = "k=19", color="blue")
ggplot(KNN_RBUS)+
ggtitle("Undersampling - RB")+
geom_segment(x = 11, y = 0, xend = 11, yend = 0.8092593 , linetype="dashed", color = "blue", alpha=0.5)+
geom_segment(x = 0, y = 0.8092593 , xend = 11, yend = 0.8092593 , linetype="dashed", color = "blue", alpha=0.5)+
annotate("text", x = 12.5, y = 0.8092593 , label = "k=11", color="blue")
# Rose_both
KNN_K[4,2] = 3 #KNN_togBO
KNN_K[4,3] = 3 #KNN_QBBO
KNN_K[4,4] = 3 #KNN_WRBO
KNN_K[4,5] = 3 #KNN_RBBO
ggplot(KNN_togBO)+
ggtitle("Rose Both - Together")+
geom_segment(x = 3, y = 0, xend = 3, yend = 0.8574325, linetype="dashed", color = "blue", alpha=0.5)+
geom_segment(x = 0, y = 0.8574325, xend = 3, yend = 0.8574325, linetype="dashed", color = "blue", alpha=0.5)+
annotate("text", x = 4.5, y = 0.8574325, label = "k=3", color="blue")
ggplot(KNN_QBBO)+
ggtitle("Rose Both - QB")+
geom_segment(x = 3, y = 0, xend = 3, yend = 0.8616103, linetype="dashed", color = "blue", alpha=0.5)+
geom_segment(x = 0, y = 0.8616103, xend = 3, yend = 0.8616103, linetype="dashed", color = "blue", alpha=0.5)+
annotate("text", x = 4.5, y = 0.8616103, label = "k=3", color="blue")
ggplot(KNN_WRBO)+
ggtitle("Rose Both - WR")+
geom_segment(x = 3, y = 0, xend = 3, yend = 0.8499425 , linetype="dashed", color = "blue", alpha=0.5)+
geom_segment(x = 0, y = 0.8499425 , xend = 3, yend = 0.8499425 , linetype="dashed", color = "blue", alpha=0.5)+
annotate("text", x = 4.5, y = 0.8499425 , label = "k=3", color="blue")
ggplot(KNN_RBBO)+
ggtitle("Rose Both - RB")+
geom_segment(x = 3, y = 0, xend = 3, yend = 0.8112046 , linetype="dashed", color = "blue", alpha=0.5)+
geom_segment(x = 0, y = 0.8112046 , xend = 3, yend = 0.8112046 , linetype="dashed", color = "blue", alpha=0.5)+
annotate("text", x = 4.5, y = 0.8112046 , label = "k=3", color="blue")
# Smote
KNN_K[5,2] = 3 #KNN_togSM
KNN_K[5,3] = 3 #KNN_QBSM
KNN_K[5,4] = 3 #KNN_WRSM
KNN_K[5,5] = 3 #KNN_RBSM
ggplot(KNN_togSM)+
ggtitle("Smote - Together")+
geom_segment(x = 3, y = 0, xend = 3, yend = 0.8795691, linetype="dashed", color = "blue", alpha=0.5)+
geom_segment(x = 0, y = 0.8795691, xend = 3, yend = 0.8795691, linetype="dashed", color = "blue", alpha=0.5)+
annotate("text", x = 4.5, y = 0.8795691, label = "k=3", color="blue")
ggplot(KNN_QBSM)+
ggtitle("Smote - QB")+
geom_segment(x = 3, y = 0, xend = 3, yend = 0.8527891, linetype="dashed", color = "blue", alpha=0.5)+
geom_segment(x = 0, y = 0.8527891, xend = 3, yend = 0.8527891, linetype="dashed", color = "blue", alpha=0.5)+
annotate("text", x = 4.5, y = 0.8527891, label = "k=3", color="blue")
ggplot(KNN_WRSM)+
ggtitle("Smote - WR")+
geom_segment(x = 3, y = 0, xend = 3, yend = 0.9008108 , linetype="dashed", color = "blue", alpha=0.5)+
geom_segment(x = 0, y = 0.9008108 , xend = 3, yend = 0.9008108 , linetype="dashed", color = "blue", alpha=0.5)+
annotate("text", x = 4.5, y = 0.9008108 , label = "k=3", color="blue")
ggplot(KNN_RBSM)+
ggtitle("Smote - RB")+
geom_segment(x = 3, y = 0, xend = 3, yend = 0.8732877 , linetype="dashed", color = "blue", alpha=0.5)+
geom_segment(x = 0, y = 0.8732877 , xend = 3, yend = 0.8732877 , linetype="dashed", color = "blue", alpha=0.5)+
annotate("text", x = 4.5, y = 0.8732877 , label = "k=3", color="blue")<file_sep>/Data/READMEs/RM_DataSampling.Rmd
---
title: "README for Data Sampling"
author: "Group 2"
date: "02 12 2019"
output:
pdf_document: default
html_document: default
word_document: default
---
**Corresponding R Scripts:** [Data_Sampling_ROSE](https://github.com/NicSchuler/DSF_NFLDraftPrediction/blob/master/Project_Scripts/Data_Sampling_ROSE.R);
[Data_Sampling_Smote](https://github.com/NicSchuler/DSF_NFLDraftPrediction/blob/master/Project_Scripts/Data_Sampling_Smote.R).
# 1. Introduction
In classification problems, an issue to come across can be imbalanced class sizes in the training dataset. Such imbalances can lead to a classifier performing seemingly well when looking solely at model accuracy but on closer inspection being ineffective in classifying instances of the minority class. Because the underlying dataset in this project is heavily skewed towards non-drafted players (approximately 20:1) and therefore contains a majority of zero values for the labels, classification models tend to perform sub-optimally (i.e. predict the majority class much better than the minority class). This is due to the classifier being able to be "lazy" and only classify a player as drafted when absolutely certain, as is visible by the error rates being very close to the proportion of not drafted players in the dataset.
# 2. General Approach
To deal with the problem of imbalanced class sizes, a number of methods to rework the underlying dataset are commonly applyied, some of which shall be outlined below. In some cases, it might not be viable or necessary to aim for equally sized classes (e.g. when instances of the minority class are inherently easy to classify because they exert highly specific feature values), especially in high dimensional classification problems it is however appropriate to deal with imbalance.
In this project, the imbalance issue can be dealt with by sampling an approximately equal amount of drafted and non drafted players into the training set, either using one of the methods described in the following paragraphs or with a certain pre-made assumption about one of the variables. The latter was applied to the unsampled dataset by disregarding any players that played fewer than ten games (as is done in the basic unsampled [dataset](https://github.com/NicSchuler/DSF_NFLDraftPrediction/blob/master/Data/CleanData/CleanClass2007to2014_3.Rdata)), for the main reason that these players do not have sufficient game data to be useful to the prediction and are intuitively less likely to be drafted due to severe lack of experience. To gain an oversight over the different sampling methods available, the approaches outlined below have been taken on top of the qualitative method and the resulting sampled datasets have been used as training data.
# 3. Random Oversampling for the Minority Class
In order to deal with imbalanced data, random oversampling increases the weight of the minority class, here drafted CFPs (class 1), by randomly replicating the minority class instances. This technique is known to increase the likelihood of occurring overfitting, although random oversampling does not increase information.
Since the prevalent class, here class 0, amounts to 2011 observations, to obtain a balanced
sample by random oversampling, the new sample size needs to be set to 4022.
## 3.1 Implementation with ROSE-Package
The Package Random Over-Sampling Examples (ROSE) provides functions to deal with binary classification problems in the presence of imbalanced classes. ROSE contains the function `ovun.sample` that implements traditional remedies to the class imbalance, such as oversampling the minority class (see present chapter), undersampling the majority class (see chapter 4), or a combination of over- and undersampling (see chapter 5). In order to perform the resampling technique `ovun.sample` is endowed with the argument method, which takes one value among `over`, `under` and `both`.
For randomly oversampling the minority class we make use of the argument `over`. This option determines oversampling with replacement from the minority class, here class 1, until a specified sample size `N` is reached. Since the prevalent class, here class 0, amounts to 2011 observations, to obtain a balanced sample by oversampling, we need to set the new sample size to 4022. The function `ovun.sample` generates a list from which we extract the new augmented data; the result is a new balanced data set:
```
CleanClass2007to2013_3_oversampling <- ovun.sample(Drafted~., data=CleanClass2007to2013_3, method="over",N=4022)
CleanClass2007to2013_3_oversampling <- as.data.frame(CleanClass2007to2013_3_oversampling$data)
table(CleanClass2007to2013_3_oversampling$Drafted)
0 1
2011 2011
```
Alternatively, we could have designed the oversampling by setting argument `p` of the function `ovun.sample`, which represents the probability of the positive class in the new augmented sample. In this case, the proportion of positive examples would be only approximatively equal to the specified `p`.
# 4. Random Undersampling for the Majority Class
Through random undersampling for the majority class, here class 0, a randomly choosen subset from the class with more instances is selected to match the number of samples coming from each class. This method can potentially lead to loss of information from the left-out samples. On the other hand, if instances of the majority class are near to others, this technique could yield robust results. Since the minority class, here class 1, amounts to 327 observations, to obtain a balanced sample by random undersampling, we randomly pick 327 observations out of the 2011 not drafted cases.
## 4.1 Implementation with ROSE-Package
For randomly undersampling the majority class with the function `ovun.sample` from ROSE-package we make use of the argument `under`. This option determines simple undersampling without replacement of the majority class, here class 0, until the specified sample size `N ` is reached. Since the minority class, here class 1, amounts to 327 observations, to obtain a balanced sample by undersampling, we need to set the new sample size to 654. Again, the function `ovun.sample` generates a list from which we extract the new augmented data; the result is a new balanced data set:
```
CleanClass2007to2013_3_undersampling <- ovun.sample(Drafted~.,data=CleanClass2007to2013_3,method="under",N=654)
CleanClass2007to2013_3_undersampling <- as.data.frame(CleanClass2007to2013_3_undersampling$data)
table(CleanClass2007to2013_3_undersampling$Drafted)
0 1
327 327
```
Alternatively, we could have designed the undersampling by setting argument `p`, see explenation above in chapter 3.1.
# 5. Combination of Over- and Undersampling
Applying a combination of over- and undersampling both the minority class, here class 1, is oversampled with replacement and the majority class, here class 0, is undersampled without replacement. In essence, the minority class is oversampled to reach a size definite as a realization of a binomial random variable with probability `p`, the probability of the positive class in the new augmented sample, and size `N`. The undersampling for the majority class is then performed correspondingly, to abide by the specified `N`.
## 5.1 Implementation with ROSE-Package
For the application of a combination of over- and undersampling with the function `ovun.sample` from ROSE-package we make use of the argument `both`. In this case, both the arguments `N` and `p` have to be set to establish the amount of oversampling and undersampling. We set the new sample size `N` to 2338 according to the initial total size of our unsampled data. Argument `p` is set to 0.5, which is the default value. Again, the function `ovun.sample` generates a list from which we extract the new augmented data; the result is a new balanced data set:
```{r eval=FALSE}
CleanClass2007to2013_3_both <- ovun.sample(Drafted~., data=CleanClass2007to2013_3,
method="both",
p=0.5, # probability of class 1 in new sample; default 0.5.
seed=6969, # specify random seed
N=2338) # specified sample according to initial sample size
CleanClass2007to2013_3_both <- as.data.frame(CleanClass2007to2013_3_both$data)
table(CleanClass2007to2013_3_both$Drafted)
0 1
1150 1188
```
# 6. Synthetic Minority Oversampling Technique (SMOTE)
SMOTE is a variant of oversampling, following the same basic logic as described in paragraph 3. Differing from "regular" oversampling by effectively replicating minority class instances, SMOTE generates additional synthetic instances similar to the existing ones. This is achieved through placing artificial datapoints between the original ones, while considering a certain number of nearest neighbors as an indication for what such synthetic data could look like. Because class imbalance is not combatted by using more instances of the same minority data, this can have the advantage of reducing overfit on possibly unique feature manifestations of datapoints in the existing set, however, strong bias can be induced especially in models based on the euclidian distance and especially if a high number of nearest neighbors is used.
## 6.1 Implementation with SMOTE-Package
To perform synthetic minority oversampling and therefore provide a method to combat class imbalance, the `smotefamiliy` package was used. This package contains the `SMOTE()` function which takes the arguments `K` and `dupsize`. The value passed to the `K` argument determines the number of nearest neighbors considered in the synthesis. In the script, the default value of 5 nearest neighbors was used, which leaves room for future exploration of a more optimal value through comparison of bias and variance of especially KNN when using the sampled data. The value passed to the `dupsize` argument determines the factor with which the original minority class is to be scaled. In the script, the fraction of majority to minority instances was used for this argument to achieve roughly equally sized classes. The SMOTE sampling process was performed for every position and every year to account for possible differences within the original data.
The function `SMOTE()` generates a list from which we extract the synthesized data; the result is a new balanced data set:
```{r eval=FALSE}
D_smote <- SMOTE(X = select(cleanData_var, -y), target = cleanData_var$y,
K = 5,
dup_size = length(which(cleanData_var$y == 0))/length(which(cleanData_var$y == 1)))
```
<file_sep>/Project_Scripts/DataCleaning2.R
library(tidyverse)
library(dbplyr)
library(dplyr)
# Load the function to clean the data for the different draft years
# the function is coded in "functionGetCleanClass2.R"
load("getCleanClass2.R")
# 2014 Draft---------------------------------
# Load the different raw data sets
GameSummary2013_0 <- read.csv("../Data/cfbstats-com-2013-1-5-20/player-game-statistics.csv")
PlayerList2013 <- read.csv("../Data/cfbstats-com-2013-1-5-20/player.csv")
Drafts <- read.csv("../Data/DraftedQBRBWR05_19.txt")
GameSummary2012_0 <- read.csv("../Data/cfbstats-com-2012-1-5-4/player-game-statistics.csv")
PlayerList2012 <- read.csv("../Data/cfbstats-com-2012-1-5-4/player.csv")
# Compute the clean data set with the function
Class2014_2 = getCleanClass2(2014, Drafts, GameSummary2013_0, PlayerList2013, GameSummary2012_0, PlayerList2012)
# Save the Dataset separately
save(Class2014_2, file="../Data/CleanData/CleanClass2014_2.Rdata")
# 2013 Draft---------------------------------
# Load the different raw data sets
GameSummary2012_0 <- read.csv("../Data/cfbstats-com-2012-1-5-4/player-game-statistics.csv")
PlayerList2012 <- read.csv("../Data/cfbstats-com-2012-1-5-4/player.csv")
Drafts <- read.csv("../Data/DraftedQBRBWR05_19.txt")
GameSummary2011_0 <- read.csv("../Data/cfbstats-com-2011-1-5-0/player-game-statistics.csv")
PlayerList2011 <- read.csv("../Data/cfbstats-com-2011-1-5-0/player.csv")
# Compute the clean data set with the function
Class2013_2 = getCleanClass2(2013, Drafts, GameSummary2012_0, PlayerList2012, GameSummary2011_0, PlayerList2011)
# Save the Dataset separately
save(Class2013_2, file="../Data/CleanData/CleanClass2013_2.Rdata")
# 2012 Draft---------------------------------
# Load the different raw data sets
GameSummary2011_0 <- read.csv("../Data/cfbstats-com-2011-1-5-0/player-game-statistics.csv")
PlayerList2011 <- read.csv("../Data/cfbstats-com-2011-1-5-0/player.csv")
Drafts <- read.csv("../Data/DraftedQBRBWR05_19.txt")
GameSummary2010_0 <- read.csv("../Data/cfbstats-com-2010-1-5-0/player-game-statistics.csv")
PlayerList2010 <- read.csv("../Data/cfbstats-com-2010-1-5-0/player.csv")
# Compute the clean data set with the function
Class2012_2 = getCleanClass2(2012, Drafts, GameSummary2011_0, PlayerList2011, GameSummary2010_0, PlayerList2010)
# Save the Dataset separately
save(Class2012_2, file="../Data/CleanData/CleanClass2012_2.Rdata")
# 2011 Draft---------------------------------
# Load the different raw data sets
GameSummary2010_0 <- read.csv("../Data/cfbstats-com-2010-1-5-0/player-game-statistics.csv")
PlayerList2010 <- read.csv("../Data/cfbstats-com-2010-1-5-0/player.csv")
Drafts <- read.csv("../Data/DraftedQBRBWR05_19.txt")
GameSummary2009_0 <- read.csv("../Data/cfbstats-com-2009-1-5-0/player-game-statistics.csv")
PlayerList2009 <- read.csv("../Data/cfbstats-com-2009-1-5-0/player.csv")
# Compute the clean data set with the function
Class2011_2 = getCleanClass2(2011, Drafts, GameSummary2010_0, PlayerList2010, GameSummary2009_0, PlayerList2009)
# Save the Dataset separately
save(Class2011_2, file="../Data/CleanData/CleanClass2011_2.Rdata")
# 2010 Draft---------------------------------
# Load the different raw data sets
GameSummary2009_0 <- read.csv("../Data/cfbstats-com-2009-1-5-0/player-game-statistics.csv")
PlayerList2009 <- read.csv("../Data/cfbstats-com-2009-1-5-0/player.csv")
Drafts <- read.csv("../Data/DraftedQBRBWR05_19.txt")
GameSummary2008_0 <- read.csv("../Data/cfbstats-com-2008-1-5-0/player-game-statistics.csv")
PlayerList2008 <- read.csv("../Data/cfbstats-com-2008-1-5-0/player.csv")
# Compute the clean data set with the function
Class2010_2 = getCleanClass2(2010, Drafts, GameSummary2009_0, PlayerList2009, GameSummary2008_0, PlayerList2008)
# Save the Dataset separately
save(Class2010_2, file="../Data/CleanData/CleanClass2010_2.Rdata")
# 2009 Draft---------------------------------
# Load the different raw data sets
GameSummary2008_0 <- read.csv("../Data/cfbstats-com-2008-1-5-0/player-game-statistics.csv")
PlayerList2008 <- read.csv("../Data/cfbstats-com-2008-1-5-0/player.csv")
Drafts <- read.csv("../Data/DraftedQBRBWR05_19.txt")
GameSummary2007_0 <- read.csv("../Data/cfbstats-com-2007-1-5-0/player-game-statistics.csv")
PlayerList2007 <- read.csv("../Data/cfbstats-com-2007-1-5-0/player.csv")
# Compute the clean data set with the function
Class2009_2 = getCleanClass2(2009, Drafts, GameSummary2008_0, PlayerList2008, GameSummary2007_0, PlayerList2007)
# Save the Dataset separately
save(Class2009_2, file="../Data/CleanData/CleanClass2009_2.Rdata")
# 2008 Draft---------------------------------
# Load the different raw data sets
GameSummary2007_0 <- read.csv("../Data/cfbstats-com-2007-1-5-0/player-game-statistics.csv")
PlayerList2007 <- read.csv("../Data/cfbstats-com-2007-1-5-0/player.csv")
Drafts <- read.csv("../Data/DraftedQBRBWR05_19.txt")
GameSummary2006_0 <- read.csv("../Data/cfbstats-com-2006-1-5-0/player-game-statistics.csv")
PlayerList2006 <- read.csv("../Data/cfbstats-com-2006-1-5-0/player.csv")
# Compute the clean data set with the function
Class2008_2 = getCleanClass2(2008, Drafts, GameSummary2007_0, PlayerList2007, GameSummary2006_0, PlayerList2006)
# Save the Dataset separately
save(Class2008_2, file="../Data/CleanData/CleanClass2008_2.Rdata")
# 2007 Draft---------------------------------
# Load the different raw data sets
GameSummary2006_0 <- read.csv("../Data/cfbstats-com-2006-1-5-0/player-game-statistics.csv")
PlayerList2006 <- read.csv("../Data/cfbstats-com-2006-1-5-0/player.csv")
Drafts <- read.csv("../Data/DraftedQBRBWR05_19.txt")
GameSummary2005_0 <- read.csv("../Data/cfbstats-com-2005-1-5-0/player-game-statistics.csv")
PlayerList2005 <- read.csv("../Data/cfbstats-com-2005-1-5-0/player.csv")
# Compute the clean data set with the function
Class2007_2 = getCleanClass2(2007, Drafts, GameSummary2006_0, PlayerList2006, GameSummary2005_0, PlayerList2005)
# Save the Dataset separately
save(Class2007_2, file="../Data/CleanData/CleanClass2007_2.Rdata")
# Putting the pieces together---------------------------------
load("../Data/CleanData/CleanClass2014_2.Rdata")
load("../Data/CleanData/CleanClass2013_2.Rdata")
load("../Data/CleanData/CleanClass2012_2.Rdata")
load("../Data/CleanData/CleanClass2011_2.Rdata")
load("../Data/CleanData/CleanClass2010_2.Rdata")
load("../Data/CleanData/CleanClass2009_2.Rdata")
load("../Data/CleanData/CleanClass2008_2.Rdata")
load("../Data/CleanData/CleanClass2007_2.Rdata")
CleanClass07to14_0_2 = rbind(Class2014_2,Class2013_2,Class2012_2,Class2011_2,Class2010_2,Class2009_2,Class2008_2,Class2007_2)
# Remove duplicates (this is still necessary, because some players are only picked in the Draft in their
# second year that they are eligable). Thanks to the descending order of the years the irrelevant older
# years are removed.
CleanClass2007to2014_2 = CleanClass07to14_0_2[!(duplicated(CleanClass07to14_0_2$Player.Code)),]
# Update the levels for the categorical variables, since the old layers like "FR" and "SO" we filtered out
# still are still stored
CleanClass2007to2014_2$Class = factor(CleanClass2007to2014_2$Class)
CleanClass2007to2014_2$Position = factor(CleanClass2007to2014_2$Position)
save(CleanClass2007to2014_2, file="../Data/CleanData/CleanClass2007to2014_2.Rdata")
# Looking at the data-------------
# Lets have a look at the data by plotting histograms of Games.Played over all Players and only the Drafted ones
hist(CleanClass2007to2014_2$Games.Played)
hist(CleanClass2007to2014_2$Games.Played[CleanClass2007to2014_2$Drafted==1])
# We can already see, that there is a huge amount of Players with less than 10 games played
# that, a priori don't have a big chance of being drafted
# Now we look at the same problem but in a quantitative way
# Percentage of Drafted Players in the whole dataset
sum(CleanClass2007to2014_2$Drafted)/length(CleanClass2007to2014_2$Drafted)
# Amount of Drafted Players with less than 10 games
sum(CleanClass2007to2014_2$Drafted[CleanClass2007to2014_2$Games.Played<10])
# And the Undrafted players
# Amount of Undrafted Players with less than 10 games
(length(CleanClass2007to2014_2$Drafted[CleanClass2007to2014_2$Games.Played<10])-sum(CleanClass2007to2014_2$Drafted[CleanClass2007to2014_2$Games.Played<10]))
# Percentage of Drafted Players if we cut away all players with less than 10 games
sum(CleanClass2007to2014_2$Drafted[CleanClass2007to2014_2$Games.Played>=10])/length(CleanClass2007to2014_2$Drafted[CleanClass2007to2014_2$Games.Played>=10])
# By filtering out all players with less than 10 games, we can increase the percentage of drafted
# Players from 6.2% up to 12.4%, which is better for machine learning (according to various sources)
# Optimizing the data
CleanClass2007to2014_3 = CleanClass2007to2014_2 %>%
filter(Games.Played >= 10)
save(CleanClass2007to2014_3, file="../Data/CleanData/CleanClass2007to2014_3.Rdata")
<file_sep>/Project_Scripts/functionGetCleanClass2.R
library(tidyverse)
library(dbplyr)
library(dplyr)
# Data for the NFL-Draft in 2013
# Since the NFL-Draft always takes place in spring and CFB in fall, we have to match the
# season data of the previous year. In other words: A player which is drafted in 2013 has
# played his last CFB season in 2012.
GameSummary2012_0 <- read.csv("../Data/cfbstats-com-2012-1-5-4/player-game-statistics.csv")
PlayerList2012 <- read.csv("../Data/cfbstats-com-2012-1-5-4/player.csv")
Drafts <- read.csv("../Data/DraftedQBRBWR05_19.txt")
GameSummary2011_0 <- read.csv("../Data/cfbstats-com-2011-1-5-0/player-game-statistics.csv")
PlayerList2011 <- read.csv("../Data/cfbstats-com-2011-1-5-0/player.csv")
# Create a function, that returns directly the cleaned data for a draft class (There are examples of the years, so that you don't get
# confused where to put the data of which year (minus one or minus two))
getCleanClass2 <- function(draftyear, Drafts, GameSummary2012_0, PlayerList2012, GameSummary2011_0, PlayerList2011){
# Remove the columns that are only relevant for defensive players, Kickers and Punters
# (since we only analize Quarterbacks, Runningbacks and Wide Receivers), such as Points
# (since they are just a linear combination of 6*Touchdowns + 2*2PtConversion) for
# both years.
GameSummary2012_1 <- GameSummary2012_0 %>%
select(-c(Game.Code,Tackle.Solo, Tackle.Assist, Tackle.For.Loss, Tackle.For.Loss.Yard,
Kick.Punt.Blocked, Pass.Broken.Up, Fumble.Forced, QB.Hurry, Sack, Sack.Yard,
Kickoff, Kickoff.Onside, Kickoff.Out.Of.Bounds, Kickoff.Touchback, Kickoff.Yard,
Punt, Punt.Yard, Points, Def.2XP.Att, Def.2XP.Made, Off.XP.Kick.Made, Off.XP.Kick.Att,
Field.Goal.Att, Field.Goal.Made, Misc.Ret, Misc.Ret.Yard, Misc.Ret.TD, Int.Ret, Int.Ret.Yard,
Int.Ret.TD, Fum.Ret, Fum.Ret.Yard, Fum.Ret.TD))
GameSummary2011_1 <- GameSummary2011_0 %>%
select(-c(Game.Code,Tackle.Solo, Tackle.Assist, Tackle.For.Loss, Tackle.For.Loss.Yard,
Kick.Punt.Blocked, Pass.Broken.Up, Fumble.Forced, QB.Hurry, Sack, Sack.Yard,
Kickoff, Kickoff.Onside, Kickoff.Out.Of.Bounds, Kickoff.Touchback, Kickoff.Yard,
Punt, Punt.Yard, Points, Def.2XP.Att, Def.2XP.Made, Off.XP.Kick.Made, Off.XP.Kick.Att,
Field.Goal.Att, Field.Goal.Made, Misc.Ret, Misc.Ret.Yard, Misc.Ret.TD, Int.Ret, Int.Ret.Yard,
Int.Ret.TD, Fum.Ret, Fum.Ret.Yard, Fum.Ret.TD))
# Remove all the rows that just contain 0's and add a col for the number of games played
GameSummary2012_1 = GameSummary2012_1[apply(GameSummary2012_1[,2:ncol(GameSummary2012_1)], 1, function(x) {!all(x==0)}),]
GameSummary2011_1 = GameSummary2011_1[apply(GameSummary2011_1[,2:ncol(GameSummary2011_1)], 1, function(x) {!all(x==0)}),]
# Group by players and find out how many games they played in that season
GameNmb1 <- GameSummary2012_1 %>%
select(., Player.Code) %>%
mutate(Games.Played = 1) %>%
group_by(., Player.Code) %>%
summarise_all(., sum)
GameNmb2 <- GameSummary2011_1 %>%
select(., Player.Code) %>%
mutate(Games.Played = 1) %>%
group_by(., Player.Code) %>%
summarise_all(., sum)
# Group by players and summarize by the sum, which gives us the performance of the players
# in the games in which they played in that specific year, matched by the player code
GameSummary2012_2 <- GameSummary2012_1 %>%
group_by(., Player.Code) %>%
summarise_all(., sum)
GameSummary2012_Grp = merge(x = GameSummary2012_2, y = GameNmb1, by = "Player.Code", all.x = TRUE)
GameSummary2011_2 <- GameSummary2011_1 %>%
group_by(., Player.Code) %>%
summarise_all(., sum)
GameSummary2011_Grp = merge(x = GameSummary2011_2, y = GameNmb2, by = "Player.Code", all.x = TRUE)
# Dismiss uninteresting columns about the players (unfortunately Height and Weight have too many NA-Values)
PlayerList2012_1 = PlayerList2012 %>%
select(-c(Uniform.Number, Home.Town, Home.State, Home.Country, Last.School, Height, Weight))
PlayerList2011_1 = PlayerList2011 %>%
select(-c(Uniform.Number, Home.Town, Home.State, Home.Country, Last.School, Height, Weight))
# Match some Information about the players and dismiss the rows with name "Team Team" and filter
# Class in order to only keep players in their junior (3rd) or senior (4th) year of college
# since other players are not yet eligable in the Draft. We don't filter Class in the minus 2 year,
# because back then, a player was just a class earlier. This would only risk more mistakes.
Data2012 <- as.data.frame(merge(y = GameSummary2012_Grp, x = PlayerList2012_1, by = "Player.Code", all.y = TRUE))
Data2012_1 = Data2012 %>%
filter(., First.Name != "Team") %>%
filter(., Position %in% c("QB", "RB", "WR")) %>%
mutate(Name= paste(First.Name, Last.Name, sep=" ")) %>%
filter(., Class %in% c("SR", "JR"))
Data2011 <- as.data.frame(merge(y = GameSummary2011_Grp, x = PlayerList2011_1, by = "Player.Code", all.y = TRUE))
Data2011_1 = Data2011 %>%
filter(., First.Name != "Team") %>%
filter(., Position %in% c("QB", "RB", "WR")) %>%
mutate(Name= paste(First.Name, Last.Name, sep=" "))
# Prepare the Draft-Data for the match with the combine data, the "Drafted" column will be the Y!
Drafted2013 = Drafts %>%
mutate(Name = substr(as.character(Player), start = 1, stop = nchar(as.character(Player))-9)) %>%
filter(Drafts$Year==draftyear) %>%
mutate(Drafted = 1) %>%
select(c("Name", "Drafted"))
# Match the Draft and -1 Year data (no all.x=TRUE because a drafted player that can not
# be matched to any season data is worthless for the Prediction) and chance NA Values in
# Drafted into 0's
Draft1Season2013 <- merge(x = Drafted2013, y = Data2012_1, by = "Name", all.y = TRUE)
for(i in 1:(nrow(Draft1Season2013))){
Draft1Season2013$Drafted[i] = ifelse(is.na(Draft1Season2013$Drafted[i]), 0, 1)
}
# Match the Draft, Combine, -1 Year and -2 Year data
Class2013_0 <- merge(x = Draft1Season2013, y = Data2011_1, by = "Player.Code", all.x = TRUE)
# Fill general player information with -2 Year data if missing in -1
Class2013_1 = Class2013_0
for(i in 1:nrow(Class2013_1)){
Class2013_1$Player.Code.x[i] = ifelse(is.na(Class2013_1$Player.Code.x[i]), Class2013_1$Player.Code.y[i], Class2013_1$Player.Code.x[i])
Class2013_1$Team.Code.x[i] = ifelse(is.na(Class2013_1$Team.Code.x[i]), Class2013_1$Team.Code.y[i], Class2013_1$Team.Code.x[i])
Class2013_1$Last.Name.x[i] = (if(is.na(Class2013_1$Last.Name.x[i])){Class2013_1$Last.Name.y[i]} else{Class2013_1$Last.Name.x[i]})
Class2013_1$First.Name.x[i] = (if(is.na(Class2013_1$First.Name.x[i])){Class2013_1$First.Name.y[i]} else{Class2013_1$First.Name.x[i]})
Class2013_1$Class.x[i] = (if(is.na(Class2013_1$Class.x[i])){Class2013_1$Class.y[i]} else{Class2013_1$Class.x[i]})
Class2013_1$Position.x[i] = (if(is.na(Class2013_1$Position.x[i])){Class2013_1$Position.y[i]} else{Class2013_1$Position.x[i]})
Class2013_1$Drafted[i] = ifelse(is.na(Class2013_1$Drafted[i]), 0, Class2013_1$Drafted[i])
}
# remove personal information that now is available twice
Class2013clean = Class2013_1 %>%
mutate(Year = draftyear) %>%
select(-c(Team.Code.x, Last.Name.x, First.Name.x, Team.Code.y, Last.Name.y, First.Name.y, Class.y, Position.y, Name.y)) %>%
select(Player.Code, Name.x, Class.x, Position.x, Year, Drafted, everything())
# Remove the Players that couln't be matched with any season data
Class2013clean1 = Class2013clean[apply(Class2013clean[,7:ncol(Class2013clean)], 1, function(x) {!all(is.na(x))}),]
# Remove dublicated players (or players with the same name that could not be matched)
CleanClassYear = Class2013clean1[!(duplicated(Class2013clean1$Name.x)),]
# Remove all the NA's
CleanClassYear[is.na(CleanClassYear)] = 0
# Separate the tibble into a couple of pieces to bring them into the format we want at the end,
# which contains the first six cols once and then the results of the two seasons together and
# not separately like before
PlayerInfo = CleanClassYear[,1:6]
names(PlayerInfo) = substr(names(PlayerInfo), start = 1, stop = nchar(names(PlayerInfo))-2)
PlayerInfo1 = PlayerInfo %>%
mutate(Year = Ye) %>%
mutate(Drafted = Draft) %>%
mutate(Player.Code = Player.Co) %>%
select(-c("Draft", "Ye", "Player.Co"))
Season1 = CleanClassYear[,c(1, 7:30)]
names(Season1) = substr(names(Season1), start = 1, stop = nchar(names(Season1))-2)
Season2 = CleanClassYear[,c(1, 31:54)]
names(Season2) = substr(names(Season2), start = 1, stop = nchar(names(Season2))-2)
Seasons = rbind(Season1, Season2)
SeasonsGrp = Seasons %>%
group_by(., Player.Co) %>%
summarise_all(., sum) %>%
mutate(Player.Code = Player.Co) %>%
select(-Player.Co)
# Put the pieces together again
CleanClassYeartogether <- merge(x = PlayerInfo1, y = SeasonsGrp, by = "Player.Code")
return(CleanClassYeartogether)
}
save(getCleanClass2, file = "getCleanClass2.R")
<file_sep>/Project_Scripts/Data_Sampling_Smote.R
library(tidyverse)
library(smotefamily)
load("../Data/CleanData/CleanClass2007to2014_3.RData")
cleanData <- as_tibble(CleanClass2007to2014_3) %>% drop_na(.)
# SMOTE sampling ----
# Synthetic minority over-sampling technique (SMOTE) combats class imbalance in datasets used for machine learning
# by artificially inflating the smaller class. In order to achieve this, synthetic values are created for the
# minority class by placing them between existing values. This script uses the SMOTE() function from the "smotefamily"
# package which takes the parameters K = number of nearest neighbors taken into account during sampling; and dup_size =
# target factor for number of synthetic values of the minority class over the original number of majority instances (e.g.
# if dup_size = 2 and the original imbalance was 1/10 minority to majority instances, the resulting imbalance will be
# 2/10). To achieve a roughly balanced sample, the inverse of the original imbalance is passed as dup_size in this script.
# Preparing for the loops
cleanData_smote <- rep(NA, 29)
posit <- c("QB", "RB", "WR")
for (pos in posit) {
for (j in 1:7) {
# Drop variables that will not be replicated
cleanData_var <- cleanData %>% filter(., Year == 2006 + j, Position == pos) %>% drop_na(.) %>%
mutate(., "y" = as.factor(Drafted)) %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted)
# Perform SMOTE sampling on training data with the target of equally sized classes
set.seed(6969)
D_smote <- SMOTE(X = select(cleanData_var, -y),
target = cleanData_var$y,
K = 5, dup_size = length(which(cleanData_var$y == 0))/length(which(cleanData_var$y == 1)))
# Add the player names, codes, positions and years back to the non-synthetic values in the dataset
orig_all <- cleanData %>% filter(., Year == 2006 + j, Position == pos) %>% arrange(., desc(Drafted)) %>% select(., Player.Code, Name, Position, Year, Drafted)
orig_N <- D_smote$orig_N %>% mutate(., "Player.Code" = filter(orig_all, orig_all$Drafted == 0)$Player.Code,
"Name" = filter(orig_all, orig_all$Drafted == 0)$Name,
"Position" = filter(orig_all, orig_all$Drafted == 0)$Position,
"Year" = filter(orig_all, orig_all$Drafted == 0)$Year,
"Drafted" = class) %>%
select(., -class)
orig_P <- D_smote$orig_P %>% mutate(., "Player.Code" = filter(orig_all, orig_all$Drafted == 1)$Player.Code,
"Name" = filter(orig_all, orig_all$Drafted == 1)$Name,
"Position" = filter(orig_all, orig_all$Drafted == 1)$Position,
"Year" = filter(orig_all, orig_all$Drafted == 1)$Year,
"Drafted" = class) %>%
select(., -class)
# For the synthetic data, add "syn" as player code and name
syn_data <- D_smote$syn_data %>% mutate(., "Player.Code" = "syn",
"Name" = "syn",
"Position" = pos,
"Year" = 2006 + j,
"Drafted" = class) %>%
select(., -class)
# Combine the data
cleanData_smote <- rbind(cleanData_smote, orig_N, orig_P, syn_data)
}
}
# Remove the first row (which is NA) from the data
cleanData_smote <- cleanData_smote[-1,]
# Rearrange the data
cleanData_smote <- cleanData_smote %>% select(., Player.Code, Name, Position, Year, Drafted, everything()) %>%
arrange(., desc(Year))
# Save the SMOTE sampled data as a new dataset
save(cleanData_smote, file="../Data/CleanData/CleanClass2007to2013_3_smote.Rdata")
<file_sep>/Data/READMEs/RM_KNN.Rmd
---
title: "README for K-Nearest Neighbor (KNN)"
author: "Group 2"
date: "02 12 2019"
output:
pdf_document: default
html_document: default
word_document: default
---
**Corresponding R Script:** [KNN](https://github.com/NicSchuler/DSF_NFLDraftPrediction/tree/master/Project_Scripts)
# 1. Introduction
There are two classes of college football players (CFPs); drafted and not drafted to the NFL. A CFP-profile consists of different features $x_{1} ... x_{n}$. To estimate whether a new CFP will be drafted to the NFL or not, for a new CFP-profiles' features we use K-nearest neighbor (KNN) algorithm to determine that. KNN is a an algorithm that predictes each observation based on its โsimilarityโ to other observations. The algorithm is memory based, beacuse the part of "learning" consists of simply storing the training examples, which is also called lazy learning.
# 2. Majority Decision and Euclidean Distance
KNN measures similarity in the sense that it identifies $k$ observations that are nearest to the new CFP-profile's features and then uses in our context (since we classify CFPs), the most common class of those $k$ observations as the predicted output. The classification is then based on a majority decision, in which the above mentioned $k$ next - already classified - objects participate. For two classes, as in our case, a tie in the majority decision can be prevented by the choice of an odd $k$.
In order to determine the similarity between observations a distance metric is used to compute the pairwise differences between the observations. The most common distance measure is the Euclidean distance metric, which is used in our case. It should be noted that other common used distance measures also exist, such as Manhatten or Minkowski, which are not used in the present case. The Euclidean measures the straight-line distance between two observations $x_{a}$ and $x_{b}$ for all $n$ features:
$$\sqrt{\sum_{n=1}^{P} (x_{an}-x_{bn})^2}$$
# 3. Data Standardization
Due to the squaring, the Euclidean distance is sensitive to outliers and furthermore to the scale of the features. Features that have different scales will bias the distance measures as those predictors with the large values will contribute most to the distance between two features. Due to the possible differences between magnitudes of features' values, we standardize the data. As we will show in Chapter 5 we use the z-score, which is a common method to standardize the data. From each feature value we substract the mean and divide by standard deviation:
$$z=\frac{(x-\mu)}{\sigma}$$
The z-score is the number of standard deviations a score is above or below the mean. Since we have only numeric features and no categorical for prediction, see [RM.DataHandling](https://github.com/NicSchuler/DSF_NFLDraftPrediction/tree/master/Data/READMEs), we do not have to apply methods such as one-hote encoding to represent categorical features numerically.
# 5. Implementation in R-Studio
The application of KNN in R Studio is explained below. If individual code sections are analyzed in more detail, the code for the category `_tog` is shown as an example. The shown code is also representative for the categories `_QB`, `_RB` and `_WR`.
## 5.1 Training KNN Models with 10-fold Cross Validation
For training we use the corresponding data from the years 2007 to 2013 with respect to the unsampled and sampled datasets. We train KNN models with 10-fold cross validation and therefore use the package `caret`, which is generally used for classification and regression training. By using `train()` we evaluate the accuracy of the KNN classifiers with different values of $k$ by 10-fold cross validation.
With `expand.grid()` we also provide a tuning grid for the values of $k$. The function`expand.grid()` returns a data frame, which the `tuneGrid` argument expects. The KNN parameter $k$ can then be tuned with the argument `tuneGrid`. By setting `k = seq(3, 25, by = 1)` the function `train()` will try all $k$ (integer) values between 3 and 25 to determine $k$ for the KNN model, which optimizes accuracy by 10-fold cross validation.
```
tr_control <- trainControl(method="repeatedcv", number=10, repeats = 3)
hyper_grid <- expand.grid(k = seq(3, 25, by = 1))
set.seed(6969)
KNN_tog <- train(Drafted~.,
data=Data2007to2013_tog,
method="knn",
trControl=tr_control,
preProcess=c("center","scale"),
tuneGrid=hyper_grid)
```
For the probability cutoff value 0.5 is used by default in the following code. Further we store the predictions in a Checklist. What we later do with the checklist is described in the script [KNN](https://github.com/NicSchuler/DSF_NFLDraftPrediction/tree/master/Project_Scripts).
```
predict_tog <- predict(KNN_tog,Data2007to2013_tog)
CheckList_tog = cbind.data.frame(Data2007to2013_tog$Drafted,predict_tog)
```
## 5.2 Choosing $k$
The following plots records the cross validated results for the respecitve training data and positions, where (integer) values between 3 and 25 are assessed for $k$. (For better resolution, see the script [KNN](https://github.com/NicSchuler/DSF_NFLDraftPrediction/tree/master/Project_Scripts).)

The performance of the models is sensitive to the choice of $k$. With low values for $k$ a model typically overfits and for large values the model often underfits. At the extremes, when $k=1$, we would base the prediction on a single observation that has the closest distance measure. On the other hand, when $k=n$, we would simply be using the most common class, in our context not drafted CFP, across all training samples as our predicted value. There is no general rule existent about the optimal $k$; it depends on the nature of the data. For high signal data with few noisy, respectively irrelevant features, smaller values of $k$ tend to work best. As more irrelevant features are present, larger values of $k$ are required to smooth out the noise. The k values values received in our case are listed in a tables below:
\newpage
```{r, include=FALSE}
library(tidyverse)
load("RM_KNN_files/KNN_K.Rdata")
```
```{r echo = FALSE}
knitr::kable(KNN_K, caption = "k Values for KNN Models")
```
In our case the following may be possible. In our context are a lot of different features considered in the models. All these features are also considered regarding the individual player positions `_QB`, `_RB` and `_WR`, whereas from an objective point of view all these features do not always seem suitable to measure their position specific performance. Therefore a larger value could be used to smooth out the noise. On the other hand, there are strong correlations between many variables, as can be seen in the [ReadMe](https://github.com/NicSchuler/DSF_NFLDraftPrediction/tree/master/Data/READMEs) and [Script](https://github.com/NicSchuler/DSF_NFLDraftPrediction/tree/master/Project_Scripts) for the Naive Bayes classifier. If these variables are good indicators, they can reinforce each other and would determine the classification relatively more strongly; this would indicate a rather lower $k$ value.
These questions do not need to be addressed further in our case. Due to our research question, only the most optimal model is tested with the data from 2014 and, in particular, there is no focus on the topic of "variable importance" here. Thus, statements about the determination of an optimal $k$ value and its interpretation represent starting points for further projects.
<file_sep>/Data/READMEs/RM_ClassTree.Rmd
---
title: "README for the classification tree"
author: "Group 2"
date: "`December, 2nd, 2019`"
output:
pdf_document:
fig_caption: yes
keep_tex: yes
html_document:
df_print: paged
abstract: This Chapter is describes the classification tree algorithm applied to the different data sets (all with '_3', one unsampled and four sampled), coded in the script 'ClassificationTree.R'.
urlcolor: blue
---
## Fundamentals of Classification Trees
Classification trees split the different variables in order to obtain the most homogeneos possible clusters, by minimizing a loss function, that can be restricted (this complexity parameter is called 'cp' in the rpart-Package). For every split it computes the sum of the errors on both sides of the split for all Variables and chooses the one with the lowest error.
## Our Approach
As also applied in the other models, we use all the available information just before the 2014 NFL-Draft, in order to train the model and then apply it on the data for 2014. In other words we act as if it was the end of April 2014 (which is one week before the draft).
For growing trees on our College League / NFL Draft data, we check whether the best results can be optained, by manually splitting the data sets on the three postitions (QB / WR / RB) or if the computer will do that on his own. For growing the trees we use the rpart-Package, which is commonly used for this purpose, since it does very much on his own. When growing a tree it uses k-fold cross-validation (by default k=10) for optimizing the model with respect to the best complexity and the spots to split. Therefore we do no further cross-validation on the data set.
You can see the different trees, that we grew, plotted with the fancyRpartPlot-function out of the rattle-Package in the Appendix at the end of this file. We are cross-validating the sampling methods and therefore grow 20 trees in total, we will still just display the four trees from the unsampled dataframe. Since we use data with many variables and a couple of splits are made, the plots are not really readable. The aim of showing them, is to visualize the complexity of the trees.
## Performance Measurement of the trees
We now want to have a look on the performance of the trees. In order to compare the results to other models performance, we decided to always take unsampled data for this. And since our business case is to predict the 2014 draft (as if it was the day before), we apply it on the data from 2007 to 2013 to compare the classification errors.
```{r, include=FALSE}
library(tidyverse)
load("../PerformanceMeasurement/ClassificationTreePerfMeas.Rdata")
ClassificationTreePerfMeas1 = ClassificationTreePerfMeas %>%
select(-c(Method, Sampling))
PerfMeas = as.data.frame(t(ClassificationTreePerfMeas1))
names(PerfMeas) = c("No Sampling", "Oversampling", "Undersampling", "Rose Both", "Smote")
PerfMeasPerc = data.frame(Sampling = ClassificationTreePerfMeas$Sampling, QB = NA, WR = NA, RB = NA, Together = NA, stringsAsFactors = FALSE)
for(i in 1:nrow(ClassificationTreePerfMeas)){
PerfMeasPerc$QB[i] = (ClassificationTreePerfMeas$QB_TP[i] + ClassificationTreePerfMeas$QB_TN[i])/(ClassificationTreePerfMeas$QB_TP[i] + ClassificationTreePerfMeas$QB_TN[i] + ClassificationTreePerfMeas$QB_FN[i] + ClassificationTreePerfMeas$QB_FP[i])
PerfMeasPerc$WR[i] = (ClassificationTreePerfMeas$WR_TP[i] + ClassificationTreePerfMeas$WR_TN[i])/(ClassificationTreePerfMeas$WR_TP[i] + ClassificationTreePerfMeas$WR_TN[i] + ClassificationTreePerfMeas$WR_FN[i] + ClassificationTreePerfMeas$WR_FP[i])
PerfMeasPerc$RB[i] = (ClassificationTreePerfMeas$RB_TP[i] + ClassificationTreePerfMeas$RB_TN[i])/(ClassificationTreePerfMeas$RB_TP[i] + ClassificationTreePerfMeas$RB_TN[i] + ClassificationTreePerfMeas$RB_FN[i] + ClassificationTreePerfMeas$RB_FP[i])
PerfMeasPerc$Together[i] = (ClassificationTreePerfMeas$Together_TP[i] + ClassificationTreePerfMeas$Together_TN[i])/(ClassificationTreePerfMeas$Together_TP[i] + ClassificationTreePerfMeas$Together_TN[i] + ClassificationTreePerfMeas$Together_FN[i] + ClassificationTreePerfMeas$Together_FP[i])
}
load("../PerformanceMeasurement/ClassificationTreePerfMeas14.Rdata")
ClassificationTreePerfMeas14_1 = ClassificationTreePerfMeas14 %>%
select(-c(Method, Sampling))
PerfMeas14 = as.data.frame(t(ClassificationTreePerfMeas14_1))
names(PerfMeas14) = c("No Sampling", "Oversampling", "Undersampling", "Rose Both", "Smote")
PerfMeasPerc14 = data.frame(Sampling = ClassificationTreePerfMeas14$Sampling, QB = NA, WR = NA, RB = NA, Together = NA, stringsAsFactors = FALSE)
for(i in 1:nrow(ClassificationTreePerfMeas14)){
PerfMeasPerc14$QB[i] = (ClassificationTreePerfMeas14$QB_TP[i] + ClassificationTreePerfMeas14$QB_TN[i])/(ClassificationTreePerfMeas14$QB_TP[i] + ClassificationTreePerfMeas14$QB_TN[i] + ClassificationTreePerfMeas14$QB_FN[i] + ClassificationTreePerfMeas14$QB_FP[i])
PerfMeasPerc14$WR[i] = (ClassificationTreePerfMeas14$WR_TP[i] + ClassificationTreePerfMeas14$WR_TN[i])/(ClassificationTreePerfMeas14$WR_TP[i] + ClassificationTreePerfMeas14$WR_TN[i] + ClassificationTreePerfMeas14$WR_FN[i] + ClassificationTreePerfMeas14$WR_FP[i])
PerfMeasPerc14$RB[i] = (ClassificationTreePerfMeas14$RB_TP[i] + ClassificationTreePerfMeas14$RB_TN[i])/(ClassificationTreePerfMeas14$RB_TP[i] + ClassificationTreePerfMeas14$RB_TN[i] + ClassificationTreePerfMeas14$RB_FN[i] + ClassificationTreePerfMeas14$RB_FP[i])
PerfMeasPerc14$Together[i] = (ClassificationTreePerfMeas14$Together_TP[i] + ClassificationTreePerfMeas14$Together_TN[i])/(ClassificationTreePerfMeas14$Together_TP[i] + ClassificationTreePerfMeas14$Together_TN[i] + ClassificationTreePerfMeas14$Together_FN[i] + ClassificationTreePerfMeas14$Together_FP[i])
}
```
```{r echo = FALSE}
knitr::kable(PerfMeas, caption = "True Positives/Negatives and False Positives/Negatives of the trees for the different models on training data")
knitr::kable(PerfMeas14, caption = "True Positives/Negatives and False Positives/Negatives of the trees for the different models on testing data")
```
In order to compare them better, we will now have a look at the ratio of correct classification, which is equal to:
$$\frac{Correct Classifications}{All Classifications} = \frac{TP + TN}{TP+TP+FP+FN}$$
```{r echo = FALSE}
knitr::kable(PerfMeasPerc, caption = "Percentage of right classifications on training data", digits=4)
```
\newpage
```{r echo = FALSE}
knitr::kable(PerfMeasPerc14, caption = "Percentage of right classifications on testing data", digits=4)
```
## Conclusion
As we see, the models for the manually separated positions mostly perform better than the model for QB/WR/RB together. But the bigger effect, which we can see, is that the sampling reduces the accuracy of the models quite much. Since we have 12.4% of drafted players in the data, a model predicting 0 (=not drafted) would outperform the models trained on sampled data.
Within the Classification tree models, the highest accuracy could be obtained, by using the three models for the manually separated positions with a weighted average accuracy of 91.74%. Keeping in mind, that a model that always predicts '0' would also have an accuracy of 87.6%, this models performance, which looks very good on the first sight, is only a quite small improvement.
## Appendix
\newpage
{#id .class width=50%}
{#id .class width=50%}
{#id .class width=50%}
{#id .class width=50%}
<file_sep>/Project_Scripts/LogisticRegression.R
rm(list=ls())
graphics.off()
library(tidyverse)
library(boot)
library(caret)
### Preparations ---------
## Data for 2007 to 2014
load("../Data/CleanData/CleanClass2007to2014_3.Rdata")
no_samplingData = as_tibble(CleanClass2007to2014_3)
predict2007to2013_tog = no_samplingData %>%
filter(Year != 2014)
predict2007to2013_QB = predict2007to2013_tog %>% filter(Position == "QB")
predict2007to2013_WR = predict2007to2013_tog %>% filter(Position == "WR")
predict2007to2013_RB = predict2007to2013_tog %>% filter(Position == "RB")
predict2014_tog = no_samplingData %>%
filter(Year == 2014)
predict2014_QB = predict2014_tog %>% filter(Position == "QB")
predict2014_WR = predict2014_tog %>% filter(Position == "WR")
predict2014_RB = predict2014_tog %>% filter(Position == "RB")
# oversampling
load("../Data/CleanData/CleanClass2007to2013_3_oversampling.Rdata")
oversamplingData = as_tibble(CleanClass2007to2014_3_oversampling)
# undersampling
load("../Data/CleanData/CleanClass2007to2013_3_undersampling.Rdata")
undersamplingData = CleanClass2007to2014_3_undersampling
# Rose_both
load("../Data/CleanData/CleanClass2007to2013_3_Rose.both.Rdata")
Rose_bothData = CleanClass2007to2014_3_Rose.both
# Smote
load("../Data/CleanData/CleanClass2007to2013_3_smote.Rdata")
SmoteData = cleanData_smote
## matrices for performance measurement
LogisticRegressionPerfMeas = data.frame(Method = character(), Sampling = character(), QB_TP = integer(), QB_TN = integer(), QB_FP = integer(), QB_FN = integer(),
WR_TP = integer(), WR_TN = integer(), WR_FP = integer(), WR_FN = integer(),
RB_TP = integer(), RB_TN = integer(), RB_FP = integer(), RB_FN = integer(),
Together_TP = integer(), Together_TN = integer(), Together_FP = integer(), Together_FN = integer(), stringsAsFactors = FALSE)
LogisticRegressionPerfMeas[1:5,1] = "LogisticRegression"
LogisticRegressionPerfMeas[1:5,2] = c("no_sampling", "oversampling", "undersampling", "Rose_both", "Smote")
LogisticRegressionPerfMeas2014 = data.frame(Method = character(), Sampling = character(), QB_TP = integer(), QB_TN = integer(), QB_FP = integer(), QB_FN = integer(),
WR_TP = integer(), WR_TN = integer(), WR_FP = integer(), WR_FN = integer(),
RB_TP = integer(), RB_TN = integer(), RB_FP = integer(), RB_FN = integer(),
Together_TP = integer(), Together_TN = integer(), Together_FP = integer(), Together_FN = integer(), stringsAsFactors = FALSE)
LogisticRegressionPerfMeas2014[1:5,1] = "LogisticRegression"
LogisticRegressionPerfMeas2014[1:5,2] = c("no_sampling", "oversampling", "undersampling", "Rose_both", "Smote")
# The following steps are repeated 5 times, once for each of the data samples loaded above.
# 1. No Sampling ---------
### Logistic Regression for all players together --------
## training and testing data
# We use years 2007 to 2013 for training and the year 2014 for testing. We chose this approach in order to simulate our business case. We want to predict the outcome
# of the upcoming Draft (in our case the one of 2014) based on all the data we have from the previous years.
Data_tog_train = no_samplingData %>%
filter(Year != 2014) %>%
select(-Class, -Position, -Name, -Player.Code, -Year)
## training the model on the training data, including a 10-fold cross-validation
model_logit_tog = train(Drafted ~ .,
data = Data_tog_train,
trControl = trainControl(method = "cv", number = 10),
method = "glm",
family=binomial())
## Performance Measurement
# training error
Together_Pred = ifelse(predict(model_logit_tog, predict2007to2013_tog)>0.5, 1, 0)
CheckList_train_tog_1 = tibble("Together_Pred" = Together_Pred,
"Together_TP" = ifelse(Together_Pred == 1 & predict2007to2013_tog$Drafted == 1, 1, 0),
"Together_FP" = ifelse(Together_Pred == 1 & predict2007to2013_tog$Drafted == 0, 1, 0),
"Together_TN" = ifelse(Together_Pred == 0 & predict2007to2013_tog$Drafted == 0, 1, 0),
"Together_FN" = ifelse(Together_Pred == 0 & predict2007to2013_tog$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas[1,"Together_TP"] = sum(CheckList_train_tog_1$Together_TP)
LogisticRegressionPerfMeas[1,"Together_TN"] = sum(CheckList_train_tog_1$Together_TN)
LogisticRegressionPerfMeas[1,"Together_FP"] = sum(CheckList_train_tog_1$Together_FP)
LogisticRegressionPerfMeas[1,"Together_FN"] = sum(CheckList_train_tog_1$Together_FN)
# testing error
Together_Pred = ifelse(predict(model_logit_tog, newdata = predict2014_tog)>0.5, 1, 0)
CheckList_test_tog_1 = tibble("Together_Pred" = Together_Pred,
"Together_TP" = ifelse(Together_Pred == 1 & predict2014_tog$Drafted == 1, 1, 0),
"Together_FP" = ifelse(Together_Pred == 1 & predict2014_tog$Drafted == 0, 1, 0),
"Together_TN" = ifelse(Together_Pred == 0 & predict2014_tog$Drafted == 0, 1, 0),
"Together_FN" = ifelse(Together_Pred == 0 & predict2014_tog$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas2014[1,"Together_TP"] = sum(CheckList_test_tog_1$Together_TP)
LogisticRegressionPerfMeas2014[1,"Together_TN"] = sum(CheckList_test_tog_1$Together_TN)
LogisticRegressionPerfMeas2014[1,"Together_FP"] = sum(CheckList_test_tog_1$Together_FP)
LogisticRegressionPerfMeas2014[1,"Together_FN"] = sum(CheckList_test_tog_1$Together_FN)
### Logistic Regression for QBs --------
## training and testing data
# We use years 2007 to 2013 for training and the year 2014 for testing.
Data_QB_train = no_samplingData %>%
filter(Year != 2014) %>%
filter(Position == "QB") %>%
select(-Class, -Position, -Name, -Player.Code, -Year)
## training the model on the training set, including a 10-fold cross-validation
model_logit_QB <- train(Drafted ~ .,
data = Data_QB_train,
trControl = trainControl(method = "cv", number = 10),
method = "glm",
family=binomial())
## Performance Measurement
# training error
QB_Pred = ifelse(predict(model_logit_QB, predict2007to2013_QB)>0.5, 1, 0)
CheckList_train_QB_1 = tibble("QB_Pred" = QB_Pred,
"QB_TP" = ifelse(QB_Pred == 1 &predict2007to2013_QB$Drafted == 1, 1, 0),
"QB_FP" = ifelse(QB_Pred == 1 &predict2007to2013_QB$Drafted == 0, 1, 0),
"QB_TN" = ifelse(QB_Pred == 0 &predict2007to2013_QB$Drafted == 0, 1, 0),
"QB_FN" = ifelse(QB_Pred == 0 &predict2007to2013_QB$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas[1,"QB_TP"] = sum(CheckList_train_QB_1$QB_TP)
LogisticRegressionPerfMeas[1,"QB_TN"] = sum(CheckList_train_QB_1$QB_TN)
LogisticRegressionPerfMeas[1,"QB_FP"] = sum(CheckList_train_QB_1$QB_FP)
LogisticRegressionPerfMeas[1,"QB_FN"] = sum(CheckList_train_QB_1$QB_FN)
# testing error
QB_Pred = ifelse(predict(model_logit_QB, newdata = predict2014_QB)>0.5, 1, 0)
CheckList_test_QB_1 = tibble("QB_Pred" = QB_Pred,
"QB_TP" = ifelse(QB_Pred == 1 & predict2014_QB$Drafted == 1, 1, 0),
"QB_FP" = ifelse(QB_Pred == 1 & predict2014_QB$Drafted == 0, 1, 0),
"QB_TN" = ifelse(QB_Pred == 0 & predict2014_QB$Drafted == 0, 1, 0),
"QB_FN" = ifelse(QB_Pred == 0 & predict2014_QB$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas2014[1,"QB_TP"] = sum(CheckList_test_QB_1$QB_TP)
LogisticRegressionPerfMeas2014[1,"QB_TN"] = sum(CheckList_test_QB_1$QB_TN)
LogisticRegressionPerfMeas2014[1,"QB_FP"] = sum(CheckList_test_QB_1$QB_FP)
LogisticRegressionPerfMeas2014[1,"QB_FN"] = sum(CheckList_test_QB_1$QB_FN)
### Logistic Regression for WRs --------
## training and testing data
# We use years 2007 to 2013 for training and the year 2014 for testing.
Data_WR_train = no_samplingData %>%
filter(Year != 2014) %>%
filter(Position == "WR") %>%
select(-Class, -Position, -Name, -Player.Code, -Year)
## train the model on the training data, including a 10-fold cross-validation
model_logit_WR <- train(Drafted ~ .,
data = Data_WR_train,
trControl = trainControl(method = "cv", number = 10),
method = "glm",
family=binomial())
## Performance Measurement
# training error
WR_Pred = ifelse(predict(model_logit_WR, predict2007to2013_WR)>0.5, 1, 0)
CheckList_train_WR_1 = tibble("WR_Pred" = WR_Pred,
"WR_TP" = ifelse(WR_Pred == 1 &predict2007to2013_WR$Drafted == 1, 1, 0),
"WR_FP" = ifelse(WR_Pred == 1 &predict2007to2013_WR$Drafted == 0, 1, 0),
"WR_TN" = ifelse(WR_Pred == 0 &predict2007to2013_WR$Drafted == 0, 1, 0),
"WR_FN" = ifelse(WR_Pred == 0 &predict2007to2013_WR$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas[1,"WR_TP"] = sum(CheckList_train_WR_1$WR_TP)
LogisticRegressionPerfMeas[1,"WR_TN"] = sum(CheckList_train_WR_1$WR_TN)
LogisticRegressionPerfMeas[1,"WR_FP"] = sum(CheckList_train_WR_1$WR_FP)
LogisticRegressionPerfMeas[1,"WR_FN"] = sum(CheckList_train_WR_1$WR_FN)
# testing error
WR_Pred = ifelse(predict(model_logit_WR, newdata = predict2014_WR)>0.5, 1, 0)
CheckList_test_WR_1 = tibble("WR_Pred" = WR_Pred,
"WR_TP" = ifelse(WR_Pred == 1 & predict2014_WR$Drafted == 1, 1, 0),
"WR_FP" = ifelse(WR_Pred == 1 & predict2014_WR$Drafted == 0, 1, 0),
"WR_TN" = ifelse(WR_Pred == 0 & predict2014_WR$Drafted == 0, 1, 0),
"WR_FN" = ifelse(WR_Pred == 0 & predict2014_WR$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas2014[1,"WR_TP"] = sum(CheckList_test_WR_1$WR_TP)
LogisticRegressionPerfMeas2014[1,"WR_TN"] = sum(CheckList_test_WR_1$WR_TN)
LogisticRegressionPerfMeas2014[1,"WR_FP"] = sum(CheckList_test_WR_1$WR_FP)
LogisticRegressionPerfMeas2014[1,"WR_FN"] = sum(CheckList_test_WR_1$WR_FN)
### Logistic Regression for RBs --------
## training and testing data
# We use years 2007 to 2013 for training and the year 2014 for testing.
Data_RB_train = no_samplingData %>%
filter(Year != 2014) %>%
filter(Position == "RB") %>%
select(-Class, -Position, -Name, -Player.Code, -Year)
## train the model on training data, inclding a 10-fold cross-validation
model_logit_RB <- train(Drafted ~ .,
data = Data_RB_train,
trControl = trainControl(method = "cv", number = 10),
method = "glm",
family=binomial())
## Performance Measurement
# training error
RB_Pred = ifelse(predict(model_logit_RB, predict2007to2013_RB)>0.5, 1, 0)
CheckList_train_RB_1 = tibble("RB_Pred" = RB_Pred,
"RB_TP" = ifelse(RB_Pred == 1 &predict2007to2013_RB$Drafted == 1, 1, 0),
"RB_FP" = ifelse(RB_Pred == 1 &predict2007to2013_RB$Drafted == 0, 1, 0),
"RB_TN" = ifelse(RB_Pred == 0 &predict2007to2013_RB$Drafted == 0, 1, 0),
"RB_FN" = ifelse(RB_Pred == 0 &predict2007to2013_RB$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas[1,"RB_TP"] = sum(CheckList_train_RB_1$RB_TP)
LogisticRegressionPerfMeas[1,"RB_TN"] = sum(CheckList_train_RB_1$RB_TN)
LogisticRegressionPerfMeas[1,"RB_FP"] = sum(CheckList_train_RB_1$RB_FP)
LogisticRegressionPerfMeas[1,"RB_FN"] = sum(CheckList_train_RB_1$RB_FN)
# testing error
RB_Pred = ifelse(predict(model_logit_RB, newdata = predict2014_RB)>0.5, 1, 0)
CheckList_test_RB_1 = tibble("RB_Pred" = RB_Pred,
"RB_TP" = ifelse(RB_Pred == 1 & predict2014_RB$Drafted == 1, 1, 0),
"RB_FP" = ifelse(RB_Pred == 1 & predict2014_RB$Drafted == 0, 1, 0),
"RB_TN" = ifelse(RB_Pred == 0 & predict2014_RB$Drafted == 0, 1, 0),
"RB_FN" = ifelse(RB_Pred == 0 & predict2014_RB$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas2014[1,"RB_TP"] = sum(CheckList_test_RB_1$RB_TP)
LogisticRegressionPerfMeas2014[1,"RB_TN"] = sum(CheckList_test_RB_1$RB_TN)
LogisticRegressionPerfMeas2014[1,"RB_FP"] = sum(CheckList_test_RB_1$RB_FP)
LogisticRegressionPerfMeas2014[1,"RB_FN"] = sum(CheckList_test_RB_1$RB_FN)
# 2. Oversampling ---------
### Logistic Regression for all players together --------
## training and testing data
# We use years 2007 to 2013 for training and the year 2014 for testing.
Data_tog_train = oversamplingData %>%
filter(Year != 2014) %>%
select(-Class, -Position, -Name, -Player.Code, -Year)
## training the model on the training data, including a 10-fold cross-validation
model_logit_tog = train(Drafted ~ .,
data = Data_tog_train,
trControl = trainControl(method = "cv", number = 10),
method = "glm",
family=binomial())
## Performance Measurement
# training error
Together_Pred = ifelse(predict(model_logit_tog, predict2007to2013_tog)>0.5, 1, 0)
CheckList_train_tog_2 = tibble("Together_Pred" = Together_Pred,
"Together_TP" = ifelse(Together_Pred == 1 & predict2007to2013_tog$Drafted == 1, 1, 0),
"Together_FP" = ifelse(Together_Pred == 1 & predict2007to2013_tog$Drafted == 0, 1, 0),
"Together_TN" = ifelse(Together_Pred == 0 & predict2007to2013_tog$Drafted == 0, 1, 0),
"Together_FN" = ifelse(Together_Pred == 0 & predict2007to2013_tog$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas[2,"Together_TP"] = sum(CheckList_train_tog_2$Together_TP)
LogisticRegressionPerfMeas[2,"Together_TN"] = sum(CheckList_train_tog_2$Together_TN)
LogisticRegressionPerfMeas[2,"Together_FP"] = sum(CheckList_train_tog_2$Together_FP)
LogisticRegressionPerfMeas[2,"Together_FN"] = sum(CheckList_train_tog_2$Together_FN)
# testing error
Together_Pred = ifelse(predict(model_logit_tog, newdata = predict2014_tog)>0.5, 1, 0)
CheckList_test_tog_2 = tibble("Together_Pred" = Together_Pred,
"Together_TP" = ifelse(Together_Pred == 1 & predict2014_tog$Drafted == 1, 1, 0),
"Together_FP" = ifelse(Together_Pred == 1 & predict2014_tog$Drafted == 0, 1, 0),
"Together_TN" = ifelse(Together_Pred == 0 & predict2014_tog$Drafted == 0, 1, 0),
"Together_FN" = ifelse(Together_Pred == 0 & predict2014_tog$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas2014[2,"Together_TP"] = sum(CheckList_test_tog_2$Together_TP)
LogisticRegressionPerfMeas2014[2,"Together_TN"] = sum(CheckList_test_tog_2$Together_TN)
LogisticRegressionPerfMeas2014[2,"Together_FP"] = sum(CheckList_test_tog_2$Together_FP)
LogisticRegressionPerfMeas2014[2,"Together_FN"] = sum(CheckList_test_tog_2$Together_FN)
### Logistic Regression for QBs --------
## training and testing data
# We use years 2007 to 2013 for training and the year 2014 for testing.
Data_QB_train = oversamplingData %>%
filter(Year != 2014) %>%
filter(Position == "QB") %>%
select(-Class, -Position, -Name, -Player.Code, -Year)
## training the model on the training set, including a 10-fold cross-validation
model_logit_QB <- train(Drafted ~ .,
data = Data_QB_train,
trControl = trainControl(method = "cv", number = 10),
method = "glm",
family=binomial())
## Performance Measurement
# training error
QB_Pred = ifelse(predict(model_logit_QB, predict2007to2013_QB)>0.5, 1, 0)
CheckList_train_QB_2 = tibble("QB_Pred" = QB_Pred,
"QB_TP" = ifelse(QB_Pred == 1 &predict2007to2013_QB$Drafted == 1, 1, 0),
"QB_FP" = ifelse(QB_Pred == 1 &predict2007to2013_QB$Drafted == 0, 1, 0),
"QB_TN" = ifelse(QB_Pred == 0 &predict2007to2013_QB$Drafted == 0, 1, 0),
"QB_FN" = ifelse(QB_Pred == 0 &predict2007to2013_QB$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas[2,"QB_TP"] = sum(CheckList_train_QB_2$QB_TP)
LogisticRegressionPerfMeas[2,"QB_TN"] = sum(CheckList_train_QB_2$QB_TN)
LogisticRegressionPerfMeas[2,"QB_FP"] = sum(CheckList_train_QB_2$QB_FP)
LogisticRegressionPerfMeas[2,"QB_FN"] = sum(CheckList_train_QB_2$QB_FN)
# testing error
QB_Pred = ifelse(predict(model_logit_QB, newdata = predict2014_QB)>0.5, 1, 0)
CheckList_test_QB_2 = tibble("QB_Pred" = QB_Pred,
"QB_TP" = ifelse(QB_Pred == 1 & predict2014_QB$Drafted == 1, 1, 0),
"QB_FP" = ifelse(QB_Pred == 1 & predict2014_QB$Drafted == 0, 1, 0),
"QB_TN" = ifelse(QB_Pred == 0 & predict2014_QB$Drafted == 0, 1, 0),
"QB_FN" = ifelse(QB_Pred == 0 & predict2014_QB$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas2014[2,"QB_TP"] = sum(CheckList_test_QB_2$QB_TP)
LogisticRegressionPerfMeas2014[2,"QB_TN"] = sum(CheckList_test_QB_2$QB_TN)
LogisticRegressionPerfMeas2014[2,"QB_FP"] = sum(CheckList_test_QB_2$QB_FP)
LogisticRegressionPerfMeas2014[2,"QB_FN"] = sum(CheckList_test_QB_2$QB_FN)
### Logistic Regression for WRs --------
## training and testing data
# We use years 2007 to 2013 for training and the year 2014 for testing.
Data_WR_train = oversamplingData %>%
filter(Year != 2014) %>%
filter(Position == "WR") %>%
select(-Class, -Position, -Name, -Player.Code, -Year)
## train the model on the training data, including a 10-fold cross-validation
model_logit_WR <- train(Drafted ~ .,
data = Data_WR_train,
trControl = trainControl(method = "cv", number = 10),
method = "glm",
family=binomial())
## Performance Measurement
# training error
WR_Pred = ifelse(predict(model_logit_WR, predict2007to2013_WR)>0.5, 1, 0)
CheckList_train_WR_2 = tibble("WR_Pred" = WR_Pred,
"WR_TP" = ifelse(WR_Pred == 1 &predict2007to2013_WR$Drafted == 1, 1, 0),
"WR_FP" = ifelse(WR_Pred == 1 &predict2007to2013_WR$Drafted == 0, 1, 0),
"WR_TN" = ifelse(WR_Pred == 0 &predict2007to2013_WR$Drafted == 0, 1, 0),
"WR_FN" = ifelse(WR_Pred == 0 &predict2007to2013_WR$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas[2,"WR_TP"] = sum(CheckList_train_WR_2$WR_TP)
LogisticRegressionPerfMeas[2,"WR_TN"] = sum(CheckList_train_WR_2$WR_TN)
LogisticRegressionPerfMeas[2,"WR_FP"] = sum(CheckList_train_WR_2$WR_FP)
LogisticRegressionPerfMeas[2,"WR_FN"] = sum(CheckList_train_WR_2$WR_FN)
# testing error
WR_Pred = ifelse(predict(model_logit_WR, newdata = predict2014_WR)>0.5, 1, 0)
CheckList_test_WR_2 = tibble("WR_Pred" = WR_Pred,
"WR_TP" = ifelse(WR_Pred == 1 & predict2014_WR$Drafted == 1, 1, 0),
"WR_FP" = ifelse(WR_Pred == 1 & predict2014_WR$Drafted == 0, 1, 0),
"WR_TN" = ifelse(WR_Pred == 0 & predict2014_WR$Drafted == 0, 1, 0),
"WR_FN" = ifelse(WR_Pred == 0 & predict2014_WR$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas2014[2,"WR_TP"] = sum(CheckList_test_WR_2$WR_TP)
LogisticRegressionPerfMeas2014[2,"WR_TN"] = sum(CheckList_test_WR_2$WR_TN)
LogisticRegressionPerfMeas2014[2,"WR_FP"] = sum(CheckList_test_WR_2$WR_FP)
LogisticRegressionPerfMeas2014[2,"WR_FN"] = sum(CheckList_test_WR_2$WR_FN)
### Logistic Regression for RBs --------
## training and testing data
# We use years 2007 to 2013 for training and the year 2014 for testing.
Data_RB_train = oversamplingData %>%
filter(Year != 2014) %>%
filter(Position == "RB") %>%
select(-Class, -Position, -Name, -Player.Code, -Year)
## train the model on training data, inclding a 10-fold cross-validation
model_logit_RB <- train(Drafted ~ .,
data = Data_RB_train,
trControl = trainControl(method = "cv", number = 10),
method = "glm",
family=binomial())
## Performance Measurement
# training error
RB_Pred = ifelse(predict(model_logit_RB, predict2007to2013_RB)>0.5, 1, 0)
CheckList_train_RB_2 = tibble("RB_Pred" = RB_Pred,
"RB_TP" = ifelse(RB_Pred == 1 &predict2007to2013_RB$Drafted == 1, 1, 0),
"RB_FP" = ifelse(RB_Pred == 1 &predict2007to2013_RB$Drafted == 0, 1, 0),
"RB_TN" = ifelse(RB_Pred == 0 &predict2007to2013_RB$Drafted == 0, 1, 0),
"RB_FN" = ifelse(RB_Pred == 0 &predict2007to2013_RB$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas[2,"RB_TP"] = sum(CheckList_train_RB_2$RB_TP)
LogisticRegressionPerfMeas[2,"RB_TN"] = sum(CheckList_train_RB_2$RB_TN)
LogisticRegressionPerfMeas[2,"RB_FP"] = sum(CheckList_train_RB_2$RB_FP)
LogisticRegressionPerfMeas[2,"RB_FN"] = sum(CheckList_train_RB_2$RB_FN)
# testing error
RB_Pred = ifelse(predict(model_logit_RB, newdata = predict2014_RB)>0.5, 1, 0)
CheckList_test_RB_2 = tibble("RB_Pred" = RB_Pred,
"RB_TP" = ifelse(RB_Pred == 1 & predict2014_RB$Drafted == 1, 1, 0),
"RB_FP" = ifelse(RB_Pred == 1 & predict2014_RB$Drafted == 0, 1, 0),
"RB_TN" = ifelse(RB_Pred == 0 & predict2014_RB$Drafted == 0, 1, 0),
"RB_FN" = ifelse(RB_Pred == 0 & predict2014_RB$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas2014[2,"RB_TP"] = sum(CheckList_test_RB_2$RB_TP)
LogisticRegressionPerfMeas2014[2,"RB_TN"] = sum(CheckList_test_RB_2$RB_TN)
LogisticRegressionPerfMeas2014[2,"RB_FP"] = sum(CheckList_test_RB_2$RB_FP)
LogisticRegressionPerfMeas2014[2,"RB_FN"] = sum(CheckList_test_RB_2$RB_FN)
# 3. Undersampling ---------
### Logistic Regression for all players together --------
## training and testing data
# We use years 2007 to 2013 for training and the year 2014 for testing.
Data_tog_train = undersamplingData %>%
filter(Year != 2014) %>%
select(-Class, -Position, -Name, -Player.Code, -Year)
## training the model on the training data, including a 10-fold cross-validation
model_logit_tog = train(Drafted ~ .,
data = Data_tog_train,
trControl = trainControl(method = "cv", number = 10),
method = "glm",
family=binomial())
## Performance Measurement
# training error
Together_Pred = ifelse(predict(model_logit_tog, predict2007to2013_tog)>0.5, 1, 0)
CheckList_train_tog_3 = tibble("Together_Pred" = Together_Pred,
"Together_TP" = ifelse(Together_Pred == 1 & predict2007to2013_tog$Drafted == 1, 1, 0),
"Together_FP" = ifelse(Together_Pred == 1 & predict2007to2013_tog$Drafted == 0, 1, 0),
"Together_TN" = ifelse(Together_Pred == 0 & predict2007to2013_tog$Drafted == 0, 1, 0),
"Together_FN" = ifelse(Together_Pred == 0 & predict2007to2013_tog$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas[3,"Together_TP"] = sum(CheckList_train_tog_3$Together_TP)
LogisticRegressionPerfMeas[3,"Together_TN"] = sum(CheckList_train_tog_3$Together_TN)
LogisticRegressionPerfMeas[3,"Together_FP"] = sum(CheckList_train_tog_3$Together_FP)
LogisticRegressionPerfMeas[3,"Together_FN"] = sum(CheckList_train_tog_3$Together_FN)
# testing error
Together_Pred = ifelse(predict(model_logit_tog, newdata = predict2014_tog)>0.5, 1, 0)
CheckList_test_tog_3 = tibble("Together_Pred" = Together_Pred,
"Together_TP" = ifelse(Together_Pred == 1 & predict2014_tog$Drafted == 1, 1, 0),
"Together_FP" = ifelse(Together_Pred == 1 & predict2014_tog$Drafted == 0, 1, 0),
"Together_TN" = ifelse(Together_Pred == 0 & predict2014_tog$Drafted == 0, 1, 0),
"Together_FN" = ifelse(Together_Pred == 0 & predict2014_tog$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas2014[3,"Together_TP"] = sum(CheckList_test_tog_3$Together_TP)
LogisticRegressionPerfMeas2014[3,"Together_TN"] = sum(CheckList_test_tog_3$Together_TN)
LogisticRegressionPerfMeas2014[3,"Together_FP"] = sum(CheckList_test_tog_3$Together_FP)
LogisticRegressionPerfMeas2014[3,"Together_FN"] = sum(CheckList_test_tog_3$Together_FN)
### Logistic Regression for QBs --------
## training and testing data
# We use years 2007 to 2013 for training and the year 2014 for testing.
Data_QB_train = undersamplingData %>%
filter(Year != 2014) %>%
filter(Position == "QB") %>%
select(-Class, -Position, -Name, -Player.Code, -Year)
## training the model on the training set, including a 10-fold cross-validation
model_logit_QB <- train(Drafted ~ .,
data = Data_QB_train,
trControl = trainControl(method = "cv", number = 10),
method = "glm",
family=binomial())
## Performance Measurement
# training error
QB_Pred = ifelse(predict(model_logit_QB, predict2007to2013_QB)>0.5, 1, 0)
CheckList_train_QB_3 = tibble("QB_Pred" = QB_Pred,
"QB_TP" = ifelse(QB_Pred == 1 &predict2007to2013_QB$Drafted == 1, 1, 0),
"QB_FP" = ifelse(QB_Pred == 1 &predict2007to2013_QB$Drafted == 0, 1, 0),
"QB_TN" = ifelse(QB_Pred == 0 &predict2007to2013_QB$Drafted == 0, 1, 0),
"QB_FN" = ifelse(QB_Pred == 0 &predict2007to2013_QB$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas[3,"QB_TP"] = sum(CheckList_train_QB_3$QB_TP)
LogisticRegressionPerfMeas[3,"QB_TN"] = sum(CheckList_train_QB_3$QB_TN)
LogisticRegressionPerfMeas[3,"QB_FP"] = sum(CheckList_train_QB_3$QB_FP)
LogisticRegressionPerfMeas[3,"QB_FN"] = sum(CheckList_train_QB_3$QB_FN)
# testing error
QB_Pred = ifelse(predict(model_logit_QB, newdata = predict2014_QB)>0.5, 1, 0)
CheckList_test_QB_3 = tibble("QB_Pred" = QB_Pred,
"QB_TP" = ifelse(QB_Pred == 1 & predict2014_QB$Drafted == 1, 1, 0),
"QB_FP" = ifelse(QB_Pred == 1 & predict2014_QB$Drafted == 0, 1, 0),
"QB_TN" = ifelse(QB_Pred == 0 & predict2014_QB$Drafted == 0, 1, 0),
"QB_FN" = ifelse(QB_Pred == 0 & predict2014_QB$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas2014[3,"QB_TP"] = sum(CheckList_test_QB_3$QB_TP)
LogisticRegressionPerfMeas2014[3,"QB_TN"] = sum(CheckList_test_QB_3$QB_TN)
LogisticRegressionPerfMeas2014[3,"QB_FP"] = sum(CheckList_test_QB_3$QB_FP)
LogisticRegressionPerfMeas2014[3,"QB_FN"] = sum(CheckList_test_QB_3$QB_FN)
### Logistic Regression for WRs --------
## training and testing data
# We use years 2007 to 2013 for training and the year 2014 for testing.
Data_WR_train = undersamplingData %>%
filter(Year != 2014) %>%
filter(Position == "WR") %>%
select(-Class, -Position, -Name, -Player.Code, -Year)
## train the model on the training data, including a 10-fold cross-validation
model_logit_WR <- train(Drafted ~ .,
data = Data_WR_train,
trControl = trainControl(method = "cv", number = 10),
method = "glm",
family=binomial())
## Performance Measurement
# training error
WR_Pred = ifelse(predict(model_logit_WR, predict2007to2013_WR)>0.5, 1, 0)
CheckList_train_WR_3 = tibble("WR_Pred" = WR_Pred,
"WR_TP" = ifelse(WR_Pred == 1 &predict2007to2013_WR$Drafted == 1, 1, 0),
"WR_FP" = ifelse(WR_Pred == 1 &predict2007to2013_WR$Drafted == 0, 1, 0),
"WR_TN" = ifelse(WR_Pred == 0 &predict2007to2013_WR$Drafted == 0, 1, 0),
"WR_FN" = ifelse(WR_Pred == 0 &predict2007to2013_WR$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas[3,"WR_TP"] = sum(CheckList_train_WR_3$WR_TP)
LogisticRegressionPerfMeas[3,"WR_TN"] = sum(CheckList_train_WR_3$WR_TN)
LogisticRegressionPerfMeas[3,"WR_FP"] = sum(CheckList_train_WR_3$WR_FP)
LogisticRegressionPerfMeas[3,"WR_FN"] = sum(CheckList_train_WR_3$WR_FN)
# testing error
WR_Pred = ifelse(predict(model_logit_WR, newdata = predict2014_WR)>0.5, 1, 0)
CheckList_test_WR_3 = tibble("WR_Pred" = WR_Pred,
"WR_TP" = ifelse(WR_Pred == 1 & predict2014_WR$Drafted == 1, 1, 0),
"WR_FP" = ifelse(WR_Pred == 1 & predict2014_WR$Drafted == 0, 1, 0),
"WR_TN" = ifelse(WR_Pred == 0 & predict2014_WR$Drafted == 0, 1, 0),
"WR_FN" = ifelse(WR_Pred == 0 & predict2014_WR$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas2014[3,"WR_TP"] = sum(CheckList_test_WR_3$WR_TP)
LogisticRegressionPerfMeas2014[3,"WR_TN"] = sum(CheckList_test_WR_3$WR_TN)
LogisticRegressionPerfMeas2014[3,"WR_FP"] = sum(CheckList_test_WR_3$WR_FP)
LogisticRegressionPerfMeas2014[3,"WR_FN"] = sum(CheckList_test_WR_3$WR_FN)
### Logistic Regression for RBs --------
## training and testing data
# We use years 2007 to 2013 for training and the year 2014 for testing.
Data_RB_train = undersamplingData %>%
filter(Year != 2014) %>%
filter(Position == "RB") %>%
select(-Class, -Position, -Name, -Player.Code, -Year)
## train the model on training data, inclding a 10-fold cross-validation
model_logit_RB <- train(Drafted ~ .,
data = Data_RB_train,
trControl = trainControl(method = "cv", number = 10),
method = "glm",
family=binomial())
## Performance Measurement
# training error
RB_Pred = ifelse(predict(model_logit_RB, predict2007to2013_RB)>0.5, 1, 0)
CheckList_train_RB_3 = tibble("RB_Pred" = RB_Pred,
"RB_TP" = ifelse(RB_Pred == 1 &predict2007to2013_RB$Drafted == 1, 1, 0),
"RB_FP" = ifelse(RB_Pred == 1 &predict2007to2013_RB$Drafted == 0, 1, 0),
"RB_TN" = ifelse(RB_Pred == 0 &predict2007to2013_RB$Drafted == 0, 1, 0),
"RB_FN" = ifelse(RB_Pred == 0 &predict2007to2013_RB$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas[3,"RB_TP"] = sum(CheckList_train_RB_3$RB_TP)
LogisticRegressionPerfMeas[3,"RB_TN"] = sum(CheckList_train_RB_3$RB_TN)
LogisticRegressionPerfMeas[3,"RB_FP"] = sum(CheckList_train_RB_3$RB_FP)
LogisticRegressionPerfMeas[3,"RB_FN"] = sum(CheckList_train_RB_3$RB_FN)
# testing error
RB_Pred = ifelse(predict(model_logit_RB, newdata = predict2014_RB)>0.5, 1, 0)
CheckList_test_RB_3 = tibble("RB_Pred" = RB_Pred,
"RB_TP" = ifelse(RB_Pred == 1 & predict2014_RB$Drafted == 1, 1, 0),
"RB_FP" = ifelse(RB_Pred == 1 & predict2014_RB$Drafted == 0, 1, 0),
"RB_TN" = ifelse(RB_Pred == 0 & predict2014_RB$Drafted == 0, 1, 0),
"RB_FN" = ifelse(RB_Pred == 0 & predict2014_RB$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas2014[3,"RB_TP"] = sum(CheckList_test_RB_3$RB_TP)
LogisticRegressionPerfMeas2014[3,"RB_TN"] = sum(CheckList_test_RB_3$RB_TN)
LogisticRegressionPerfMeas2014[3,"RB_FP"] = sum(CheckList_test_RB_3$RB_FP)
LogisticRegressionPerfMeas2014[3,"RB_FN"] = sum(CheckList_test_RB_3$RB_FN)
# 4. Rose_both ---------
### Logistic Regression for all players together --------
## training and testing data
# We use years 2007 to 2013 for training and the year 2014 for testing.
Data_tog_train = Rose_bothData %>%
filter(Year != 2014) %>%
select(-Class, -Position, -Name, -Player.Code, -Year)
## training the model on the training data, including a 10-fold cross-validation
model_logit_tog = train(Drafted ~ .,
data = Data_tog_train,
trControl = trainControl(method = "cv", number = 10),
method = "glm",
family=binomial())
## Performance Measurement
# training error
Together_Pred = ifelse(predict(model_logit_tog, predict2007to2013_tog)>0.5, 1, 0)
CheckList_train_tog_4 = tibble("Together_Pred" = Together_Pred,
"Together_TP" = ifelse(Together_Pred == 1 & predict2007to2013_tog$Drafted == 1, 1, 0),
"Together_FP" = ifelse(Together_Pred == 1 & predict2007to2013_tog$Drafted == 0, 1, 0),
"Together_TN" = ifelse(Together_Pred == 0 & predict2007to2013_tog$Drafted == 0, 1, 0),
"Together_FN" = ifelse(Together_Pred == 0 & predict2007to2013_tog$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas[4,"Together_TP"] = sum(CheckList_train_tog_4$Together_TP)
LogisticRegressionPerfMeas[4,"Together_TN"] = sum(CheckList_train_tog_4$Together_TN)
LogisticRegressionPerfMeas[4,"Together_FP"] = sum(CheckList_train_tog_4$Together_FP)
LogisticRegressionPerfMeas[4,"Together_FN"] = sum(CheckList_train_tog_4$Together_FN)
# testing error
Together_Pred = ifelse(predict(model_logit_tog, newdata = predict2014_tog)>0.5, 1, 0)
CheckList_test_tog_4 = tibble("Together_Pred" = Together_Pred,
"Together_TP" = ifelse(Together_Pred == 1 & predict2014_tog$Drafted == 1, 1, 0),
"Together_FP" = ifelse(Together_Pred == 1 & predict2014_tog$Drafted == 0, 1, 0),
"Together_TN" = ifelse(Together_Pred == 0 & predict2014_tog$Drafted == 0, 1, 0),
"Together_FN" = ifelse(Together_Pred == 0 & predict2014_tog$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas2014[4,"Together_TP"] = sum(CheckList_test_tog_4$Together_TP)
LogisticRegressionPerfMeas2014[4,"Together_TN"] = sum(CheckList_test_tog_4$Together_TN)
LogisticRegressionPerfMeas2014[4,"Together_FP"] = sum(CheckList_test_tog_4$Together_FP)
LogisticRegressionPerfMeas2014[4,"Together_FN"] = sum(CheckList_test_tog_4$Together_FN)
### Logistic Regression for QBs --------
## training and testing data
# We use years 2007 to 2013 for training and the year 2014 for testing.
Data_QB_train = Rose_bothData %>%
filter(Year != 2014) %>%
filter(Position == "QB") %>%
select(-Class, -Position, -Name, -Player.Code, -Year)
## training the model on the training set, including a 10-fold cross-validation
model_logit_QB <- train(Drafted ~ .,
data = Data_QB_train,
trControl = trainControl(method = "cv", number = 10),
method = "glm",
family=binomial())
## Performance Measurement
# training error
QB_Pred = ifelse(predict(model_logit_QB, predict2007to2013_QB)>0.5, 1, 0)
CheckList_train_QB_4 = tibble("QB_Pred" = QB_Pred,
"QB_TP" = ifelse(QB_Pred == 1 &predict2007to2013_QB$Drafted == 1, 1, 0),
"QB_FP" = ifelse(QB_Pred == 1 &predict2007to2013_QB$Drafted == 0, 1, 0),
"QB_TN" = ifelse(QB_Pred == 0 &predict2007to2013_QB$Drafted == 0, 1, 0),
"QB_FN" = ifelse(QB_Pred == 0 &predict2007to2013_QB$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas[4,"QB_TP"] = sum(CheckList_train_QB_4$QB_TP)
LogisticRegressionPerfMeas[4,"QB_TN"] = sum(CheckList_train_QB_4$QB_TN)
LogisticRegressionPerfMeas[4,"QB_FP"] = sum(CheckList_train_QB_4$QB_FP)
LogisticRegressionPerfMeas[4,"QB_FN"] = sum(CheckList_train_QB_4$QB_FN)
# testing error
QB_Pred = ifelse(predict(model_logit_QB, newdata = predict2014_QB)>0.5, 1, 0)
CheckList_test_QB_4 = tibble("QB_Pred" = QB_Pred,
"QB_TP" = ifelse(QB_Pred == 1 & predict2014_QB$Drafted == 1, 1, 0),
"QB_FP" = ifelse(QB_Pred == 1 & predict2014_QB$Drafted == 0, 1, 0),
"QB_TN" = ifelse(QB_Pred == 0 & predict2014_QB$Drafted == 0, 1, 0),
"QB_FN" = ifelse(QB_Pred == 0 & predict2014_QB$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas2014[4,"QB_TP"] = sum(CheckList_test_QB_4$QB_TP)
LogisticRegressionPerfMeas2014[4,"QB_TN"] = sum(CheckList_test_QB_4$QB_TN)
LogisticRegressionPerfMeas2014[4,"QB_FP"] = sum(CheckList_test_QB_4$QB_FP)
LogisticRegressionPerfMeas2014[4,"QB_FN"] = sum(CheckList_test_QB_4$QB_FN)
### Logistic Regression for WRs --------
## training and testing data
# We use years 2007 to 2013 for training and the year 2014 for testing.
Data_WR_train = Rose_bothData %>%
filter(Year != 2014) %>%
filter(Position == "WR") %>%
select(-Class, -Position, -Name, -Player.Code, -Year)
## train the model on the training data, including a 10-fold cross-validation
model_logit_WR <- train(Drafted ~ .,
data = Data_WR_train,
trControl = trainControl(method = "cv", number = 10),
method = "glm",
family=binomial())
## Performance Measurement
# training error
WR_Pred = ifelse(predict(model_logit_WR, predict2007to2013_WR)>0.5, 1, 0)
CheckList_train_WR_4 = tibble("WR_Pred" = WR_Pred,
"WR_TP" = ifelse(WR_Pred == 1 &predict2007to2013_WR$Drafted == 1, 1, 0),
"WR_FP" = ifelse(WR_Pred == 1 &predict2007to2013_WR$Drafted == 0, 1, 0),
"WR_TN" = ifelse(WR_Pred == 0 &predict2007to2013_WR$Drafted == 0, 1, 0),
"WR_FN" = ifelse(WR_Pred == 0 &predict2007to2013_WR$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas[4,"WR_TP"] = sum(CheckList_train_WR_4$WR_TP)
LogisticRegressionPerfMeas[4,"WR_TN"] = sum(CheckList_train_WR_4$WR_TN)
LogisticRegressionPerfMeas[4,"WR_FP"] = sum(CheckList_train_WR_4$WR_FP)
LogisticRegressionPerfMeas[4,"WR_FN"] = sum(CheckList_train_WR_4$WR_FN)
# testing error
WR_Pred = ifelse(predict(model_logit_WR, newdata = predict2014_WR)>0.5, 1, 0)
CheckList_test_WR_4 = tibble("WR_Pred" = WR_Pred,
"WR_TP" = ifelse(WR_Pred == 1 & predict2014_WR$Drafted == 1, 1, 0),
"WR_FP" = ifelse(WR_Pred == 1 & predict2014_WR$Drafted == 0, 1, 0),
"WR_TN" = ifelse(WR_Pred == 0 & predict2014_WR$Drafted == 0, 1, 0),
"WR_FN" = ifelse(WR_Pred == 0 & predict2014_WR$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas2014[4,"WR_TP"] = sum(CheckList_test_WR_4$WR_TP)
LogisticRegressionPerfMeas2014[4,"WR_TN"] = sum(CheckList_test_WR_4$WR_TN)
LogisticRegressionPerfMeas2014[4,"WR_FP"] = sum(CheckList_test_WR_4$WR_FP)
LogisticRegressionPerfMeas2014[4,"WR_FN"] = sum(CheckList_test_WR_4$WR_FN)
### Logistic Regression for RBs --------
## training and testing data
# We use years 2007 to 2013 for training and the year 2014 for testing.
Data_RB_train = Rose_bothData %>%
filter(Year != 2014) %>%
filter(Position == "RB") %>%
select(-Class, -Position, -Name, -Player.Code, -Year)
## train the model on training data, inclding a 10-fold cross-validation
model_logit_RB <- train(Drafted ~ .,
data = Data_RB_train,
trControl = trainControl(method = "cv", number = 10),
method = "glm",
family=binomial())
## Performance Measurement
# training error
RB_Pred = ifelse(predict(model_logit_RB, predict2007to2013_RB)>0.5, 1, 0)
CheckList_train_RB_4 = tibble("RB_Pred" = RB_Pred,
"RB_TP" = ifelse(RB_Pred == 1 &predict2007to2013_RB$Drafted == 1, 1, 0),
"RB_FP" = ifelse(RB_Pred == 1 &predict2007to2013_RB$Drafted == 0, 1, 0),
"RB_TN" = ifelse(RB_Pred == 0 &predict2007to2013_RB$Drafted == 0, 1, 0),
"RB_FN" = ifelse(RB_Pred == 0 &predict2007to2013_RB$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas[4,"RB_TP"] = sum(CheckList_train_RB_4$RB_TP)
LogisticRegressionPerfMeas[4,"RB_TN"] = sum(CheckList_train_RB_4$RB_TN)
LogisticRegressionPerfMeas[4,"RB_FP"] = sum(CheckList_train_RB_4$RB_FP)
LogisticRegressionPerfMeas[4,"RB_FN"] = sum(CheckList_train_RB_4$RB_FN)
# testing error
RB_Pred = ifelse(predict(model_logit_RB, newdata = predict2014_RB)>0.5, 1, 0)
CheckList_test_RB_4 = tibble("RB_Pred" = RB_Pred,
"RB_TP" = ifelse(RB_Pred == 1 & predict2014_RB$Drafted == 1, 1, 0),
"RB_FP" = ifelse(RB_Pred == 1 & predict2014_RB$Drafted == 0, 1, 0),
"RB_TN" = ifelse(RB_Pred == 0 & predict2014_RB$Drafted == 0, 1, 0),
"RB_FN" = ifelse(RB_Pred == 0 & predict2014_RB$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas2014[4,"RB_TP"] = sum(CheckList_test_RB_4$RB_TP)
LogisticRegressionPerfMeas2014[4,"RB_TN"] = sum(CheckList_test_RB_4$RB_TN)
LogisticRegressionPerfMeas2014[4,"RB_FP"] = sum(CheckList_test_RB_4$RB_FP)
LogisticRegressionPerfMeas2014[4,"RB_FN"] = sum(CheckList_test_RB_4$RB_FN)
# 5. Smote ---------
### Logistic Regression for all players together --------
## training and testing data
# We use years 2007 to 2013 for training and the year 2014 for testing.
Data_tog_train = SmoteData %>%
filter(Year != 2014) %>%
select(-Position, -Name, -Player.Code, -Year)
## training the model on the training data, including a 10-fold cross-validation
model_logit_tog = train(Drafted ~ .,
data = Data_tog_train,
trControl = trainControl(method = "cv", number = 10),
method = "glm",
family=binomial())
## Performance Measurement
# training error
pred = predict(model_logit_tog, predict2007to2013_tog)
CheckList_train_tog_5 = tibble("Together_Pred" = pred,
"Together_TP" = ifelse(pred == 1 & predict2007to2013_tog$Drafted == 1, 1, 0),
"Together_FP" = ifelse(pred == 1 & predict2007to2013_tog$Drafted == 0, 1, 0),
"Together_TN" = ifelse(pred == 0 & predict2007to2013_tog$Drafted == 0, 1, 0),
"Together_FN" = ifelse(pred == 0 & predict2007to2013_tog$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas[5,"Together_TP"] = sum(CheckList_train_tog_5$Together_TP)
LogisticRegressionPerfMeas[5,"Together_TN"] = sum(CheckList_train_tog_5$Together_TN)
LogisticRegressionPerfMeas[5,"Together_FP"] = sum(CheckList_train_tog_5$Together_FP)
LogisticRegressionPerfMeas[5,"Together_FN"] = sum(CheckList_train_tog_5$Together_FN)
# testing error
pred = predict(model_logit_tog, newdata = predict2014_tog)
CheckList_test_tog_5 = tibble("Together_Pred" = pred,
"Together_TP" = ifelse(pred == 1 & predict2014_tog$Drafted == 1, 1, 0),
"Together_FP" = ifelse(pred == 1 & predict2014_tog$Drafted == 0, 1, 0),
"Together_TN" = ifelse(pred == 0 & predict2014_tog$Drafted == 0, 1, 0),
"Together_FN" = ifelse(pred == 0 & predict2014_tog$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas2014[5,"Together_TP"] = sum(CheckList_test_tog_5$Together_TP)
LogisticRegressionPerfMeas2014[5,"Together_TN"] = sum(CheckList_test_tog_5$Together_TN)
LogisticRegressionPerfMeas2014[5,"Together_FP"] = sum(CheckList_test_tog_5$Together_FP)
LogisticRegressionPerfMeas2014[5,"Together_FN"] = sum(CheckList_test_tog_5$Together_FN)
### Logistic Regression for QBs --------
## training and testing data
# We use years 2007 to 2013 for training and the year 2014 for testing.
Data_QB_train = SmoteData %>%
filter(Year != 2014) %>%
filter(Position == "QB") %>%
select(-Position, -Name, -Player.Code, -Year)
## training the model on the training set, including a 10-fold cross-validation
model_logit_QB <- train(Drafted ~ .,
data = Data_QB_train,
trControl = trainControl(method = "cv", number = 10),
method = "glm",
family=binomial())
## Performance Measurement
# training error
pred = predict(model_logit_tog, newdata = predict2007to2013_QB)
CheckList_train_QB_5 = tibble("QB_Pred" = pred,
"QB_TP" = ifelse(pred == 1 &predict2007to2013_QB$Drafted == 1, 1, 0),
"QB_FP" = ifelse(pred == 1 &predict2007to2013_QB$Drafted == 0, 1, 0),
"QB_TN" = ifelse(pred == 0 &predict2007to2013_QB$Drafted == 0, 1, 0),
"QB_FN" = ifelse(pred == 0 &predict2007to2013_QB$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas[5,"QB_TP"] = sum(CheckList_train_QB_5$QB_TP)
LogisticRegressionPerfMeas[5,"QB_TN"] = sum(CheckList_train_QB_5$QB_TN)
LogisticRegressionPerfMeas[5,"QB_FP"] = sum(CheckList_train_QB_5$QB_FP)
LogisticRegressionPerfMeas[5,"QB_FN"] = sum(CheckList_train_QB_5$QB_FN)
# testing error
pred = predict(model_logit_tog, newdata = predict2014_QB)
CheckList_test_QB_5 = tibble("QB_Pred" = pred,
"QB_TP" = ifelse(pred == 1 & predict2014_QB$Drafted == 1, 1, 0),
"QB_FP" = ifelse(pred == 1 & predict2014_QB$Drafted == 0, 1, 0),
"QB_TN" = ifelse(pred == 0 & predict2014_QB$Drafted == 0, 1, 0),
"QB_FN" = ifelse(pred == 0 & predict2014_QB$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas2014[5,"QB_TP"] = sum(CheckList_test_QB_5$QB_TP)
LogisticRegressionPerfMeas2014[5,"QB_TN"] = sum(CheckList_test_QB_5$QB_TN)
LogisticRegressionPerfMeas2014[5,"QB_FP"] = sum(CheckList_test_QB_5$QB_FP)
LogisticRegressionPerfMeas2014[5,"QB_FN"] = sum(CheckList_test_QB_5$QB_FN)
### Logistic Regression for WRs --------
## training and testing data
# We use years 2007 to 2013 for training and the year 2014 for testing.
Data_WR_train = SmoteData %>%
filter(Year != 2014) %>%
filter(Position == "WR") %>%
select(-Position, -Name, -Player.Code, -Year)
## train the model on the training data, including a 10-fold cross-validation
model_logit_WR <- train(Drafted ~ .,
data = Data_WR_train,
trControl = trainControl(method = "cv", number = 10),
method = "glm",
family=binomial())
## Performance Measurement
# training error
pred = predict(model_logit_tog, newdata = predict2007to2013_WR)
CheckList_train_WR_5 = tibble("WR_Pred" = pred,
"WR_TP" = ifelse(pred == 1 &predict2007to2013_WR$Drafted == 1, 1, 0),
"WR_FP" = ifelse(pred == 1 &predict2007to2013_WR$Drafted == 0, 1, 0),
"WR_TN" = ifelse(pred == 0 &predict2007to2013_WR$Drafted == 0, 1, 0),
"WR_FN" = ifelse(pred == 0 &predict2007to2013_WR$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas[5,"WR_TP"] = sum(CheckList_train_WR_5$WR_TP)
LogisticRegressionPerfMeas[5,"WR_TN"] = sum(CheckList_train_WR_5$WR_TN)
LogisticRegressionPerfMeas[5,"WR_FP"] = sum(CheckList_train_WR_5$WR_FP)
LogisticRegressionPerfMeas[5,"WR_FN"] = sum(CheckList_train_WR_5$WR_FN)
# testing error
pred = predict(model_logit_tog, newdata = predict2014_WR)
CheckList_test_WR_5 = tibble("WR_Pred" = pred,
"WR_TP" = ifelse(pred == 1 & predict2014_WR$Drafted == 1, 1, 0),
"WR_FP" = ifelse(pred == 1 & predict2014_WR$Drafted == 0, 1, 0),
"WR_TN" = ifelse(pred == 0 & predict2014_WR$Drafted == 0, 1, 0),
"WR_FN" = ifelse(pred == 0 & predict2014_WR$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas2014[5,"WR_TP"] = sum(CheckList_test_WR_5$WR_TP)
LogisticRegressionPerfMeas2014[5,"WR_TN"] = sum(CheckList_test_WR_5$WR_TN)
LogisticRegressionPerfMeas2014[5,"WR_FP"] = sum(CheckList_test_WR_5$WR_FP)
LogisticRegressionPerfMeas2014[5,"WR_FN"] = sum(CheckList_test_WR_5$WR_FN)
### Logistic Regression for RBs --------
## training and testing data
# We use years 2007 to 2013 for training and the year 2014 for testing.
Data_RB_train = SmoteData %>%
filter(Year != 2014) %>%
filter(Position == "RB") %>%
select(-Position, -Name, -Player.Code, -Year)
## train the model on training data, inclding a 10-fold cross-validation
model_logit_RB <- train(Drafted ~ .,
data = Data_RB_train,
trControl = trainControl(method = "cv", number = 10),
method = "glm",
family=binomial())
## Performance Measurement
# training error
pred = predict(model_logit_tog, newdata = predict2007to2013_RB)
CheckList_train_RB_5 = tibble("RB_Pred" = pred,
"RB_TP" = ifelse(pred == 1 &predict2007to2013_RB$Drafted == 1, 1, 0),
"RB_FP" = ifelse(pred == 1 &predict2007to2013_RB$Drafted == 0, 1, 0),
"RB_TN" = ifelse(pred == 0 &predict2007to2013_RB$Drafted == 0, 1, 0),
"RB_FN" = ifelse(pred == 0 &predict2007to2013_RB$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas[5,"RB_TP"] = sum(CheckList_train_RB_5$RB_TP)
LogisticRegressionPerfMeas[5,"RB_TN"] = sum(CheckList_train_RB_5$RB_TN)
LogisticRegressionPerfMeas[5,"RB_FP"] = sum(CheckList_train_RB_5$RB_FP)
LogisticRegressionPerfMeas[5,"RB_FN"] = sum(CheckList_train_RB_5$RB_FN)
# testing error
pred = predict(model_logit_tog, newdata = predict2014_RB)
CheckList_test_RB_5 = tibble("RB_Pred" = pred,
"RB_TP" = ifelse(pred == 1 & predict2014_RB$Drafted == 1, 1, 0),
"RB_FP" = ifelse(pred == 1 & predict2014_RB$Drafted == 0, 1, 0),
"RB_TN" = ifelse(pred == 0 & predict2014_RB$Drafted == 0, 1, 0),
"RB_FN" = ifelse(pred == 0 & predict2014_RB$Drafted == 1, 1, 0))
# Fill the Performance Measurement Matrix
LogisticRegressionPerfMeas2014[5,"RB_TP"] = sum(CheckList_test_RB_5$RB_TP)
LogisticRegressionPerfMeas2014[5,"RB_TN"] = sum(CheckList_test_RB_5$RB_TN)
LogisticRegressionPerfMeas2014[5,"RB_FP"] = sum(CheckList_test_RB_5$RB_FP)
LogisticRegressionPerfMeas2014[5,"RB_FN"] = sum(CheckList_test_RB_5$RB_FN)
### Performance Measurement ----------
# Save Performance Measurement data frame
save(LogisticRegressionPerfMeas, file = "../Data/PerformanceMeasurement/LogisticRegressionPerfMeas.Rdata")
save(LogisticRegressionPerfMeas2014, file = "../Data/PerformanceMeasurement/LogisticRegressionPerfMeas2014.Rdata")
<file_sep>/Data/READMEs/RM_NaiveBayes.Rmd
---
title: "README for Naรฏve Bayes"
author: "Group 2"
date: "02 12 2019"
output:
pdf_document: default
html_document: default
word_document: default
---
**Corresponding R Script:** [NaรฏveBayes](https://github.com/NicSchuler/DSF_NFLDraftPrediction/tree/master/Project_Scripts)
# 1. Introduction
There are two classes of college football players (CFPs) $C_{k \in \{1,2\}} =\{D, \bar{D} \}$; drafted and not drafted to the NFL. A CFP-profile consists of different features $x_{1} ... x_{n}$. To estimate whether a new CFP will be drafted to the NFL or not, for a new CFP-profiles' features the following question has to be answered: Is the probability of $P(D\mid X)$ - the probability that the CFP will be drafted to the NFL given the features $x_{i},...,x_{n}$ - larger or smaller than the probability of $P(\bar{D}\mid X)$ - the probability that the CFP will not be drafted to the NFL given the features $x_{i},...,x_{n}$? It follows that if $P(D\mid X) < P(\bar{D} \mid X)$ the new CFP-profile with the features $x_{i},...,x_{n}$ will be labeled as not drafted and vice versa.
# 2. Bayes Rule
To compute the probabilities $P(D\mid X)$ and $P(\bar{D} \mid X)$ Bayes Rule is applied. The conditional probability $P(C_{k}\mid X)$ is defined by:
$$P(C_{k}\mid X) = \frac{ P(C_{k}\cap X)}{P(X)}$$
By rewriting the conditional probabilities and equating both right-hand sides of these equation it follows that:
$$P(C_{k}\cap X)= P(C_{k}\mid X)P(X)$$
$$P(C_{k}\cap X)= P(X\mid C_{k})P(C_{k})$$
$$P(C_{k}\mid X) = \frac{P(X\mid C_{k})P(C_{k})}{P(X)}$$
Where $P(C_{k}\mid X)$ is the posterior probability, $P(X \mid C_{k})$ the likelihood, $P(C_{k})$ the prior probability of the class and $P(X)$ the prior probability of the features. In practice, there is interest only in the numerator of that fraction, because the denominator does not depend on $C_{k}$ and the values of the features $x_{i},...,x_{n}$ are given, so that the denominator is constant. The numerator is equivalent to the joint probability model $P(C_{k}, x_{1},...,x_{n})$.
# 3. Naรฏve Bayes
The following section explains how the numerator, the denuminator and finally the the conditional distribution over the class variable $C_{k}$ are calculated in the sense of Naรฏve Bayes.
## 3.1 Compute the Numerator $P(X\mid C_{k})P(C_{k})$: Naรฏve Assumption
Using the chain-rule for repeated applications of the definition of conditional probability the numerator $P(X\mid C_{k})P(C_{k})$, respectively $P(C_{k}, x_{1},...,x_{n})$ can be decomposed as:
$$P(C_{k}, x_{1},...,x_{n}) = P(x_{1},...,x_{n}, C_{k}) = P(x_{1}\mid x_{2},...,x_{n},C_{k})P(x_{2},...,x_{n}, C_{k})$$
$$ = P(x_{1}\mid x_{2},...,x_{n},C_{k})P(x_{2}\mid x_{3},...,x_{n},C_{k})P(x_{3},...,x_{n}, C_{k})=...$$
$$=P(x_{1}\mid x_{2},...,x_{n},C_{k})P(x_{2}\mid x_{3},...,x_{n},C_{k})...P(x_{n-1}\mid x_{n},C_{k}) P(x_{n}\mid C_{k})P(C_{k}) $$
This set of probabilities can be hard and expensive to calculate. But with a conditional independence assumption, this long expression can be reduced to a very simple form. The conditional independence assumption is that given a class $C_{k}$ the feature values $x_{i}$ are independent of each other. There is no correlation between the features for a certain class. This is stated as:
$$P(x_{i}\mid x_{i+1},...,x_{n}\mid C_{k})=P(x_{i}\mid C_{k})$$
Hence $P(X\mid C_{k})P(C_{k})$, respectively the joint model $P(C_{k}, x_{1},...,x_{n})$ can be expressed as:
$$P(C_{k}\mid x_{1},...,x_{n}) \alpha P(C_{k}, x_{1},...,x_{n})$$
$$=P(C_{k})P(x_{1}\mid C_{k})P(x_{2}\mid C_{k})P(x_{3}\mid C_{k})... $$
$$= P(C_{k})\prod_{i = 1}^{n} P(x_{i}\mid C_{k}) $$
Where $\alpha$ means positive proportional to.
### 3.1.2 Gaussian Naรฏve Bayes
When dealing with continuous variables - like in our context-, a typical assumption is that the continuous variables associated with each class are distributed according to a normal (Gaussian) distribution. Then, the probability distribution of $x_{i}$ given Class $C_{k}$, $P(x_{i}\mid C_{k})$, is computed by the normal distribution, that is:
$$P(x_{i}\mid C_{k})= \frac{1}{\sqrt{2\pi \sigma^2_{k}}}e^-\frac{(x-\mu_{k})^2}{2 \sigma^2_{k}}$$
## 3.2 Compute the Denuminator $P(X_{i})$
Under the independence assumption, the conditional distribution over the class variable $C_{k}$ is:
$$P(C_{k}\mid x_{1},...,x_{n}) = \frac{1}{Z} P(C_{k})\prod_{i = 1}^{n} P(x_{i}\mid C_{k})$$
where the evidence $Z = P(X) = \sum_{k} P(C_{k}) P(X\mid C_{k})$ is a sclaing factor, which depends only on
$x_{1},...,x_{n}$, that is a constant, because the feature variables are known.
## 3.3 Compute the Posterior Probability $P(C_{k}\mid X_{i})$: Decision Rule
The naive Bayes classifier combines this model with a decision rule. A common rule is to pick the hypothesis that is most probable; this is known as the maximum a posteriori decision rule. The corresponding classifier, a Bayes classifier, is the function that assigns a class label $y=C_{k}$. Since the prior probability of the predictor $P(X)$ is constant given the input, we get:
$$y=\underset{k \in \{1,..., K\}}{\operatorname{argmax}}P(C_{k})\prod_{i = 1}^{n} P(x_{i}\mid C_{k})$$
# 4. Implementation in R-Studio
The application of Naive Bayes in R Studio is explained below. If individual code sections are analyzed in more detail, the code for the category `_QB` based on the unsampled dataset is shown as an example. The shown code is also representative for the categories `_tog`, `_RB` and `_WR` as well as for the respective sampled datasets.
## 4.1 Training Naรฏve Bayes Models with 10-fold Cross Validation
For training we use the corresponding data from the years 2007 to 2013 with respect to the unsampled and sampled datasets. We train Naรฏve Bayes models with 10-fold cross validation and therefore use the package `caret`, which is generally used for classification and regression training. By using `train()` we evaluate the accuracy of the Naรฏve Bayes classifiers by 10-fold cross validation. What distribution is used for $P(x_{i}\mid C_{k})$ in the Naรฏve Bayes models and whether e.g. a normal distribution as commonly used for continuous variables seems appropriate in our context, is explained and discussed in chapter 4.2.
```
# Define features (x) and target (y)
features_tog <- setdiff(names(Data2007to2013_tog), "Drafted")
x_tog <- Data2007to2013_tog[,features_tog]
y_tog <- Data2007to2013_tog$Drafted
# Training a naive bayes model with 10-fold cross validation
set.seed(6969)
NB_tog <- train(x_tog,y_tog,method = "nb",trControl=trainControl(method='cv',number=10))
```
For the probability cutoff value 0.5 is used by default in the following code. Further we store the predictions in a Checklist. What we later do with the checklist is described in the script [NaรฏveBayes](https://github.com/NicSchuler/DSF_NFLDraftPrediction/tree/master/Project_Scripts).
```
predict_tog <- predict(NB_tog,Data2007to2013_tog)
CheckList_tog = cbind.data.frame(Data2007to2013_tog$Drafted,predict_tog)
```
## 4.2 Density Distributions
We are dealing with continuous variables. As already mentioned a typical assumption is that the continuous variables associated with each class are distributed according to a normal (Gaussian) distribution. Then, the probability distribution of $x_{i}$ given Class $C_{k}$, $P(x_{i}\mid C_{k})$ is computed by the normal distribution. In order to see whether the predictors have discriminative power and assess whether the normal distribution of the predictors values seems adequate to compute $P(x_{i}\mid C_{k})$, we plot the density distribution of the variables and generate quantile-quantile-plots.
<center> __Density Distribution: Quarterbacks (no sampling)__ </center>

<center> __Quantile-Quantile-Plots: Quarterbacks (no sampling)__ </center>

As can be seen from the density plots, the variables discriminate between drafted and not drafted, although not to the same extent. What also becomes apparent, especially when looking at the quantile-quantile-plots, is that a Gauss distribution for the calculation of $P(x_{i}\mid C_{k})$ does not seem appropriate for every feature. A quantile-quantile-plot is a scatterplot created by plotting two sets of quantiles against one another. The quantile-quantile-plot takes the sample data, sort it in ascending order, and then plot them versus quantiles calculated from a theoretical distribution. If the data is normally distributed, the points in the plot lie on a straight diagonal line. In our case, only a few variables are rudimentarily normally distributed.
In order to maximize model accuracy, the `train()` function takes this issue in account and uses kernel density estimation per default, see `usekernel = TRUE`, to improve model accuracy:
```
> NB_QB
(...)
Accuracy was used to select the optimal model using the largest value.
The final values used for the model were fL = 0, usekernel = TRUE and adjust = 1.
```
Kernel density estimation is a non-parametric way to estimate the probability density function of a random variable. It is a fundamental data smoothing problem where conclusions about the population are made, based on a finite data sample. For further theoretical basics please refer to the corresponding literature.
## 4.3 Naรฏve Independence Assumption
As already mentioned, the Naรฏve Bayes classifier makes a simplifying assumption to allow the computation to scale. With Naรฏve Bayes, we assume that the predictor variables are conditionally independent of one another given the response value. In other words, there are no interactions or correlations among the features, which could eventually contain information that is relevant for the classification. This is an extremely strong assumption. In our context, we can see quickly that our data violates this as we have several moderately to strongly correlated variables. To find potential correlations among the individual variables a correlation plot is shown exemplary for not drafted Quarterbacks. Similar and even more extreme patterns can be found for the groups `_tog`, `_RB` and `_WR` with respect to the unsampled and sampled data; see [Naรฏve.Bayes](https://github.com/NicSchuler/DSF_NFLDraftPrediction/tree/master/Project_Scripts).
<center> __Correlation Plot for Not Drafted Quarterbacks (no sampling)__ </center>

Basically, correlation between the features has an adverse effect on the naรฏve assumption. Despite this fact, Naรฏve Bayes has been shown to be robust against this assumption. This is also the case in this analysis, which is evident from the chapter in the documentation summarizing the results - see [Documentation](https://github.com/NicSchuler/DSF_NFLDraftPrediction). Despite the violation of the naรฏve assumption, the Naive Bayes classifiers work quite well. For a robust classification, the exact probabilities of $P(C_{k}\mid X)$ respectively in our case $P(D\mid X)$ and $P(\bar{D}\mid X)$ that would take correlations into account are not required. It must only be ensured that one can correctly say which of the two probabilities is the greater one.
<file_sep>/Project_Scripts/NaiveBayes.R
rm(list=ls())
graphics.off()
# IMPORTANT NOTE: packages MUST be detached if installed and loaded in this order because klaR loads MASS
# which clashes with dplyr::select() else
# detach("package:klaR", unload = TRUE)
# detach("package:tidyverse", unload = TRUE)
library(klaR)
library(tidyverse)
library(caret) # Classification and Regression Training
library(ggplot2) # Data Visualization
library(corrplot) # Visualization of Correlation
library(reshape2) # Flexibily Reshape Data
# Performance Measurement for training data (2005 to 2013)
NaiveBayesPerfMeas = data.frame(Method = character(), Sampling = character(), QB_TP = integer(), QB_TN = integer(), QB_FP = integer(), QB_FN = integer(),
WR_TP = integer(), WR_TN = integer(), WR_FP = integer(), WR_FN = integer(),
RB_TP = integer(), RB_TN = integer(), RB_FP = integer(), RB_FN = integer(),
Together_TP = integer(), Together_TN = integer(), Together_FP = integer(), Together_FN = integer(), stringsAsFactors = FALSE)
NaiveBayesPerfMeas[1,2] = "no_sampling"
NaiveBayesPerfMeas[2,2] = "oversampling"
NaiveBayesPerfMeas[3,2] = "undersampling"
NaiveBayesPerfMeas[4,2] = "Rose_both"
NaiveBayesPerfMeas[5,2] = "Smote"
NaiveBayesPerfMeas$Method = "NaiveBayes"
# Performance Measurement for testing data (2014)
NaiveBayesPerfMeasTest = data.frame(Method = character(), Sampling = character(), QB_TP = integer(), QB_TN = integer(), QB_FP = integer(), QB_FN = integer(),
WR_TP = integer(), WR_TN = integer(), WR_FP = integer(), WR_FN = integer(),
RB_TP = integer(), RB_TN = integer(), RB_FP = integer(), RB_FN = integer(),
Together_TP = integer(), Together_TN = integer(), Together_FP = integer(), Together_FN = integer(), stringsAsFactors = FALSE)
NaiveBayesPerfMeasTest[1,2] = "no_sampling"
NaiveBayesPerfMeasTest[2,2] = "oversampling"
NaiveBayesPerfMeasTest[3,2] = "undersampling"
NaiveBayesPerfMeasTest[4,2] = "Rose_both"
NaiveBayesPerfMeasTest[5,2] = "Smote"
NaiveBayesPerfMeasTest$Method = "NaiveBayes"
###################################################
# NOTICE
###################################################
# We will do the next steps 5 times (e.g. "1. No Splitting" does the same thing as "2. Oversampling"), but using different data for training the model
# In other words, this is the cross-validation of the sampling methods. The reason for doing it a couple of times instead of looping or functioning it
# is the easier availability of the steps in between in case of further processing them.
# 1. No Sampling ###################################################
load("../Data/CleanData/CleanClass2007to2014_3.Rdata")
# I. Naive Bayes Classifier - 07 to 13, together ----------
#1 - Preparations and data visualization ----------
# Training data
CleanClass2007to2013_3<- CleanClass2007to2014_3[CleanClass2007to2014_3$Year != 2014,]
CleanClass2007to2013_3$Drafted <- as.factor(CleanClass2007to2013_3$Drafted)
Data2007to2013_tog <- CleanClass2007to2013_3 %>% select(-Position, -Class, -Name, -Player.Code, -Year,
-Safety) #this variable has zero variance
# Testing data
CleanClass2014_3<- CleanClass2007to2014_3[CleanClass2007to2014_3$Year == 2014,]
CleanClass2014_3$Drafted <- as.factor(CleanClass2014_3$Drafted)
CleanClass2014_3_tog <- CleanClass2014_3 %>% select(-Position, -Class, -Name, -Player.Code, -Year,
-Safety) #this variable has zero variance
# Density distributions
# (These observations are discussed in the ReadMe)
Data2007to2013_vis_tog <- Data2007to2013_tog
Df1_tog <- Data2007to2013_vis_tog[,c(1, 2:10)]
Long1_tog = melt(Df1_tog, id.vars= "Drafted")
ggplot(data = Long1_tog, aes(x = value, fill=Drafted)) +
geom_density(alpha=0.6) +
facet_wrap(~variable, scales = "free")
ggplot(data = Long1_tog, aes(sample = value, color=Drafted)) +
geom_qq(alpha=0.6) +
geom_qq_line()+
facet_wrap(~variable, scales = "free")
Df2_tog <- Data2007to2013_vis_tog[,c(1, 11:19)]
Long2_tog = melt(Df2_tog, id.vars= "Drafted")
ggplot(data = Long2_tog, aes(x = value, fill=Drafted)) +
geom_density(alpha=0.6) +
facet_wrap(~variable, scales = "free")
ggplot(data = Long2_tog, aes(sample = value, color=Drafted)) +
geom_qq(alpha=0.6) +
geom_qq_line()+
facet_wrap(~variable, scales = "free")
Df3_tog <- Data2007to2013_vis_tog[,c(1, 20:24)]
Long3_tog = melt(Df3_tog, id.vars= "Drafted")
ggplot(data = Long3_tog, aes(x = value, fill=Drafted)) +
geom_density(alpha=0.6) +
facet_wrap(~variable, scales = "free")
ggplot(data = Long3_tog, aes(sample = value, color=Drafted)) +
geom_qq(alpha=0.6) +
geom_qq_line()+
facet_wrap(~variable, scales = "free")
# Correlation within variables
# The naive Bayes classifier makes a simplifying assumption (hence the name) to allow the computation to scale.
# With naรฏve Bayes, we assume that the predictor variables are conditionally independent of one another given
# the response value. This is an extremely strong assumption. In our context we can see quickly that our data
# violates this as we have several moderately to strongly correlated variables. See the following correlation plots:
# (These observations are discussed in the ReadMe)
Data2007to2013_vis_tog %>%
filter(Drafted =="1") %>%
select_if(is.numeric) %>%
cor() %>%
corrplot::corrplot()
Data2007to2013_vis_tog %>%
filter(Drafted =="0") %>%
select_if(is.numeric) %>%
cor() %>%
corrplot::corrplot()
#2 - Naive Bayes ----------
# Define features (x) and target (y)
features_tog <- setdiff(names(Data2007to2013_tog), "Drafted")
x_tog <- Data2007to2013_tog[,features_tog]
y_tog <- Data2007to2013_tog$Drafted
# Training a naive bayes model with 10-fold cross validation
set.seed(6969)
NB_tog <- train(x_tog,y_tog,method = "nb",trControl=trainControl(method='cv',number=10))
# For training data
# Predictions: 0.5 is used for probability cutoff value by default
predict_tog <- predict(NB_tog,Data2007to2013_tog)
confusionMatrix(predict_tog,Data2007to2013_tog$Drafted)
# Comment on the warnings we get when running train() and predict(). (Is commented here once for the whole script).
# "Numerical 0 probability for all classes with observation...": This is not an error or indication that the code is
# 'wrong', it is just information that some of the observations are producing some unusual probabilities: These observations
# are probably outliers.
CheckList_tog = cbind.data.frame(Data2007to2013_tog$Drafted,predict_tog)
names(CheckList_tog)[names(CheckList_tog)=="Data2007to2013_tog$Drafted"] <- "Y"
names(CheckList_tog)[names(CheckList_tog)=="predict_tog"] <- "Pred"
CheckList_tog = CheckList_tog %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
NaiveBayesPerfMeas[1,"Together_TP"] = sum(CheckList_tog$TP)
NaiveBayesPerfMeas[1,"Together_TN"] = sum(CheckList_tog$TN)
NaiveBayesPerfMeas[1,"Together_FP"] = sum(CheckList_tog$FP)
NaiveBayesPerfMeas[1,"Together_FN"] = sum(CheckList_tog$FN)
# For testing data
predict_togTest <- predict(NB_tog,CleanClass2014_3_tog)
CheckList_togTest = cbind.data.frame(CleanClass2014_3_tog$Drafted,predict_togTest)
names(CheckList_togTest)[names(CheckList_togTest)=="CleanClass2014_3_tog$Drafted"] <- "Y"
names(CheckList_togTest)[names(CheckList_togTest)=="predict_togTest"] <- "Pred"
CheckList_togTest = CheckList_togTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
NaiveBayesPerfMeasTest[1,"Together_TP"] = sum(CheckList_togTest$TP)
NaiveBayesPerfMeasTest[1,"Together_TN"] = sum(CheckList_togTest$TN)
NaiveBayesPerfMeasTest[1,"Together_FP"] = sum(CheckList_togTest$FP)
NaiveBayesPerfMeasTest[1,"Together_FN"] = sum(CheckList_togTest$FN)
# II. Naive Bayes Classifier - 07 to 13, QB ----------
#1 - Preparations and data visualization ----------
# Training data
Data2007to2013_QB <- CleanClass2007to2013_3[CleanClass2007to2013_3$Position=="QB", ]
Data2007to2013_QB <- Data2007to2013_QB %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety, -Kickoff.Ret.TD, -Punt.Ret.TD) #these variables have zero variance
# Testing data
CleanClass2014_3_QB<- CleanClass2014_3[CleanClass2014_3$Position=="QB", ]
CleanClass2014_3_QB <- CleanClass2014_3_QB %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety,-Kickoff.Ret.TD, -Punt.Ret.TD) #these variables have zero variance
# Density distributions
Data2007to2013_vis_QB <- Data2007to2013_QB
Df1_QB <- Data2007to2013_vis_QB[,c(1, 2:10)]
Long1_QB = melt(Df1_QB, id.vars= "Drafted")
ggplot(data = Long1_QB, aes(x = value, fill=Drafted)) +
geom_density(alpha=0.6) +
facet_wrap(~variable, scales = "free")
ggplot(data = Long1_QB, aes(sample = value, color=Drafted)) +
geom_qq(alpha=0.6) +
geom_qq_line()+
facet_wrap(~variable, scales = "free")
Df2_QB <- Data2007to2013_vis_QB[,c(1, 11:19)]
Long2_QB = melt(Df2_QB, id.vars= "Drafted")
ggplot(data = Long2_QB, aes(x = value, fill=Drafted)) +
geom_density(alpha=0.6) +
facet_wrap(~variable, scales = "free")
ggplot(data = Long2_QB, aes(sample = value, color=Drafted)) +
geom_qq(alpha=0.6) +
geom_qq_line()+
facet_wrap(~variable, scales = "free")
Df3_QB <- Data2007to2013_vis_QB[,c(1, 20:22)]
Long3_QB = melt(Df3_QB, id.vars= "Drafted")
ggplot(data = Long3_QB, aes(x = value, fill=Drafted)) +
geom_density(alpha=0.6) +
facet_wrap(~variable, scales = "free")
ggplot(data = Long3_QB, aes(sample = value, color=Drafted)) +
geom_qq(alpha=0.6) +
geom_qq_line()+
facet_wrap(~variable, scales = "free")
# Correlation within variables
Data2007to2013_vis_QB %>%
filter(Drafted =="1") %>%
select_if(is.numeric) %>%
cor() %>%
corrplot::corrplot()
Data2007to2013_vis_QB %>%
filter(Drafted =="0") %>%
select_if(is.numeric) %>%
cor() %>%
corrplot::corrplot()
#2 - Naive Bayes ----------
# Define features (x) and target (y)
features_QB <- setdiff(names(Data2007to2013_QB), "Drafted")
x_QB <- Data2007to2013_QB[,features_QB]
y_QB <- Data2007to2013_QB$Drafted
# Training a naive bayes model with 10-fold cross validation
set.seed(6969)
NB_QB <- train(x_QB,y_QB,method = "nb",trControl=trainControl(method='cv',number=10))
# Predictions
predict_QB <- predict(NB_QB, newdata=Data2007to2013_QB)
confusionMatrix(predict_QB, Data2007to2013_QB$Drafted)
CheckList_QB = cbind.data.frame(Data2007to2013_QB$Drafted,predict_QB)
names(CheckList_QB)[names(CheckList_QB)=="Data2007to2013_QB$Drafted"] <- "Y"
names(CheckList_QB)[names(CheckList_QB)=="predict_QB"] <- "Pred"
CheckList_QB = CheckList_QB %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
NaiveBayesPerfMeas[1,"QB_TP"] = sum(CheckList_QB$TP)
NaiveBayesPerfMeas[1,"QB_TN"] = sum(CheckList_QB$TN)
NaiveBayesPerfMeas[1,"QB_FP"] = sum(CheckList_QB$FP)
NaiveBayesPerfMeas[1,"QB_FN"] = sum(CheckList_QB$FN)
# For testing data
predict_QBTest <- predict(NB_QB,CleanClass2014_3_QB)
CheckList_QBTest = cbind.data.frame(CleanClass2014_3_QB$Drafted,predict_QBTest)
names(CheckList_QBTest)[names(CheckList_QBTest)=="CleanClass2014_3_QB$Drafted"] <- "Y"
names(CheckList_QBTest)[names(CheckList_QBTest)=="predict_QBTest"] <- "Pred"
CheckList_QBTest = CheckList_QBTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
NaiveBayesPerfMeasTest[1,"QB_TP"] = sum(CheckList_QBTest$TP)
NaiveBayesPerfMeasTest[1,"QB_TN"] = sum(CheckList_QBTest$TN)
NaiveBayesPerfMeasTest[1,"QB_FP"] = sum(CheckList_QBTest$FP)
NaiveBayesPerfMeasTest[1,"QB_FN"] = sum(CheckList_QBTest$FN)
# III. Naive Bayes Classifier - 07 to 13, WR ----------
#1 - Preparations and data visualization ----------
# Training data
Data2007to2013_WR <- CleanClass2007to2013_3[CleanClass2007to2013_3$Position=="WR", ]
Data2007to2013_WR <- Data2007to2013_WR %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety) #these variables have zero variance
# Testing data
CleanClass2014_3_WR<- CleanClass2014_3[CleanClass2014_3$Position=="WR", ]
CleanClass2014_3_WR <- CleanClass2014_3_WR %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety) #these variables have zero variance
# Density distributions
Data2007to2013_vis_WR <- Data2007to2013_WR
Df1_WR <- Data2007to2013_vis_WR[,c(1, 2:10)]
Long1_WR = melt(Df1_WR, id.vars= "Drafted")
ggplot(data = Long1_WR, aes(x = value, fill=Drafted)) +
geom_density(alpha=0.6) +
facet_wrap(~variable, scales = "free")
ggplot(data = Long1_WR, aes(sample = value, color=Drafted)) +
geom_qq(alpha=0.6) +
geom_qq_line()+
facet_wrap(~variable, scales = "free")
Df2_WR <- Data2007to2013_vis_WR[,c(1, 11:19)]
Long2_WR = melt(Df2_WR, id.vars= "Drafted")
ggplot(data = Long2_WR, aes(x = value, fill=Drafted)) +
geom_density(alpha=0.6) +
facet_wrap(~variable, scales = "free")
ggplot(data = Long2_WR, aes(sample = value, color=Drafted)) +
geom_qq(alpha=0.6) +
geom_qq_line()+
facet_wrap(~variable, scales = "free")
Df3_WR <- Data2007to2013_vis_WR[,c(1, 20:24)]
Long3_WR = melt(Df3_WR, id.vars= "Drafted")
ggplot(data = Long3_WR, aes(x = value, fill=Drafted)) +
geom_density(alpha=0.6) +
facet_wrap(~variable, scales = "free")
ggplot(data = Long3_WR, aes(sample = value, color=Drafted)) +
geom_qq(alpha=0.6) +
geom_qq_line()+
facet_wrap(~variable, scales = "free")
# Correlation within variables
Data2007to2013_vis_WR %>%
filter(Drafted =="1") %>%
select_if(is.numeric) %>%
cor() %>%
corrplot::corrplot()
Data2007to2013_vis_WR %>%
filter(Drafted =="0") %>%
select_if(is.numeric) %>%
cor() %>%
corrplot::corrplot()
#2 - Naive Bayes ----------
# Define features (x) and target (y)
features_WR <- setdiff(names(Data2007to2013_WR), "Drafted")
x_WR <- Data2007to2013_WR[,features_WR]
y_WR <- Data2007to2013_WR$Drafted
# Training a naive bayes model with 10-fold cross validation
set.seed(6969)
NB_WR <- train(x_WR,y_WR,method = "nb",trControl=trainControl(method='cv',number=10))
# Predictions: 0.5 is used for probability cutoff value by default
predict_WR <- predict(NB_WR, newdata = Data2007to2013_WR)
confusionMatrix(predict_WR, Data2007to2013_WR$Drafted)
# Predictions
predict_WR <- predict(NB_WR, newdata=Data2007to2013_WR)
confusionMatrix(predict_WR, Data2007to2013_WR$Drafted)
CheckList_WR = cbind.data.frame(Data2007to2013_WR$Drafted,predict_WR)
names(CheckList_WR)[names(CheckList_WR)=="Data2007to2013_WR$Drafted"] <- "Y"
names(CheckList_WR)[names(CheckList_WR)=="predict_WR"] <- "Pred"
CheckList_WR = CheckList_WR %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
NaiveBayesPerfMeas[1,"WR_TP"] = sum(CheckList_WR$TP)
NaiveBayesPerfMeas[1,"WR_TN"] = sum(CheckList_WR$TN)
NaiveBayesPerfMeas[1,"WR_FP"] = sum(CheckList_WR$FP)
NaiveBayesPerfMeas[1,"WR_FN"] = sum(CheckList_WR$FN)
# For testing data
predict_WRTest <- predict(NB_WR,CleanClass2014_3_WR)
CheckList_WRTest = cbind.data.frame(CleanClass2014_3_WR$Drafted,predict_WRTest)
names(CheckList_WRTest)[names(CheckList_WRTest)=="CleanClass2014_3_WR$Drafted"] <- "Y"
names(CheckList_WRTest)[names(CheckList_WRTest)=="predict_WRTest"] <- "Pred"
CheckList_WRTest = CheckList_WRTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
NaiveBayesPerfMeasTest[1,"WR_TP"] = sum(CheckList_WRTest$TP)
NaiveBayesPerfMeasTest[1,"WR_TN"] = sum(CheckList_WRTest$TN)
NaiveBayesPerfMeasTest[1,"WR_FP"] = sum(CheckList_WRTest$FP)
NaiveBayesPerfMeasTest[1,"WR_FN"] = sum(CheckList_WRTest$FN)
# IV. Naive Bayes Classifier - 07 to 13, RB ----------
#1 - Preparations and data visualization ----------
# Training data
Data2007to2013_RB <- CleanClass2007to2013_3[CleanClass2007to2013_3$Position=="RB", ]
Data2007to2013_RB <- Data2007to2013_RB %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety) #these variables have zero variance
# Testing data
CleanClass2014_3_RB <- CleanClass2014_3[CleanClass2014_3$Position=="RB", ]
CleanClass2014_3_RB <- CleanClass2014_3_RB %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety) #these variables have zero variance
# Density distributions
Data2007to2013_vis_RB <- Data2007to2013_RB
Df1_RB <- Data2007to2013_vis_RB[,c(1, 2:10)]
Long1_RB = melt(Df1_RB, id.vars= "Drafted")
ggplot(data = Long1_RB, aes(x = value, fill=Drafted)) +
geom_density(alpha=0.6) +
facet_wrap(~variable, scales = "free")
ggplot(data = Long1_RB, aes(sample = value, color=Drafted)) +
geom_qq(alpha=0.6) +
geom_qq_line()+
facet_wrap(~variable, scales = "free")
Df2_RB <- Data2007to2013_vis_RB[,c(1, 11:19)]
Long2_RB = melt(Df2_RB, id.vars= "Drafted")
ggplot(data = Long2_RB, aes(x = value, fill=Drafted)) +
geom_density(alpha=0.6) +
facet_wrap(~variable, scales = "free")
ggplot(data = Long2_RB, aes(sample = value, color=Drafted)) +
geom_qq(alpha=0.6) +
geom_qq_line()+
facet_wrap(~variable, scales = "free")
Df3_RB <- Data2007to2013_vis_RB[,c(1, 20:24)]
Long3_RB = melt(Df3_RB, id.vars= "Drafted")
ggplot(data = Long3_RB, aes(x = value, fill=Drafted)) +
geom_density(alpha=0.6) +
facet_wrap(~variable, scales = "free")
ggplot(data = Long3_RB, aes(sample = value, color=Drafted)) +
geom_qq(alpha=0.6) +
geom_qq_line()+
facet_wrap(~variable, scales = "free")
# Correlation within variables
Data2007to2013_vis_RB %>%
filter(Drafted =="1") %>%
select_if(is.numeric) %>%
cor() %>%
corrplot::corrplot()
Data2007to2013_vis_RB %>%
filter(Drafted =="0") %>%
select_if(is.numeric) %>%
cor() %>%
corrplot::corrplot()
#2 - Naive Bayes ----------
# Define features (x) and target (y)
features_RB <- setdiff(names(Data2007to2013_RB), "Drafted")
x_RB <- Data2007to2013_RB[,features_RB]
y_RB <- Data2007to2013_RB$Drafted
# Training a naive bayes model with 10-fold cross validation
set.seed(6969)
NB_RB <- train(x_RB,y_RB,method = "nb",trControl=trainControl(method='cv',number=10))
# Predictions: 0.5 is used for probability cutoff value by default
predict_RB <- predict(NB_RB, newdata = Data2007to2013_RB)
confusionMatrix(predict_RB, Data2007to2013_RB$Drafted)
CheckList_RB = cbind.data.frame(Data2007to2013_RB$Drafted,predict_RB)
names(CheckList_RB)[names(CheckList_RB)=="Data2007to2013_RB$Drafted"] <- "Y"
names(CheckList_RB)[names(CheckList_RB)=="predict_RB"] <- "Pred"
CheckList_RB = CheckList_RB %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
NaiveBayesPerfMeas[1,"RB_TP"] = sum(CheckList_RB$TP)
NaiveBayesPerfMeas[1,"RB_TN"] = sum(CheckList_RB$TN)
NaiveBayesPerfMeas[1,"RB_FP"] = sum(CheckList_RB$FP)
NaiveBayesPerfMeas[1,"RB_FN"] = sum(CheckList_RB$FN)
# For testing data
predict_RBTest <- predict(NB_RB,CleanClass2014_3_RB)
CheckList_RBTest = cbind.data.frame(CleanClass2014_3_RB$Drafted,predict_RBTest)
names(CheckList_RBTest)[names(CheckList_RBTest)=="CleanClass2014_3_RB$Drafted"] <- "Y"
names(CheckList_RBTest)[names(CheckList_RBTest)=="predict_RBTest"] <- "Pred"
CheckList_RBTest = CheckList_RBTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
NaiveBayesPerfMeasTest[1,"RB_TP"] = sum(CheckList_RBTest$TP)
NaiveBayesPerfMeasTest[1,"RB_TN"] = sum(CheckList_RBTest$TN)
NaiveBayesPerfMeasTest[1,"RB_FP"] = sum(CheckList_RBTest$FP)
NaiveBayesPerfMeasTest[1,"RB_FN"] = sum(CheckList_RBTest$FN)
# 2. Oversampling ###################################################
# NOTICE: Data visualization will no longer be performed. If you are interested, the above code can be used analogously.
load("../Data/CleanData/CleanClass2007to2013_3_oversampling.Rdata")
# I. Naive Bayes Classifier - 07 to 13, together ----------
#1 - Preparations ----------
# Training data
CleanClass2007to2014_3_oversampling$Drafted <- as.factor(CleanClass2007to2014_3_oversampling$Drafted)
Data2007to2013_togOS <- CleanClass2007to2014_3_oversampling %>% select(-Position, -Class, -Name, -Player.Code, -Year,
-Safety) #this variable has zero variance.
# Testing data
CleanClass2014_3_tog
#2 - Naive Bayes ----------
# Define features (x) and target (y)
features_togOS <- setdiff(names(Data2007to2013_togOS), "Drafted")
x_togOS <- Data2007to2013_togOS[,features_togOS]
y_togOS <- Data2007to2013_togOS$Drafted
# Training a naive bayes model with 10-fold cross validation
set.seed(6969)
NB_togOS <- train(x_togOS,y_togOS,method = "nb",trControl=trainControl(method='cv',number=10))
# Predictions for training data (2007 to 2013): 0.5 is used for probability cutoff value by default
predict_togOS <- predict(NB_togOS, newdata = Data2007to2013_tog)
confusionMatrix(predict_togOS, Data2007to2013_tog$Drafted)
CheckList_togOS = cbind.data.frame(Data2007to2013_tog$Drafted,predict_togOS)
names(CheckList_togOS)[names(CheckList_togOS)=="Data2007to2013_tog$Drafted"] <- "Y"
names(CheckList_togOS)[names(CheckList_togOS)=="predict_togOS"] <- "Pred"
CheckList_togOS = CheckList_togOS %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
NaiveBayesPerfMeas[2,"Together_TP"] = sum(CheckList_togOS$TP)
NaiveBayesPerfMeas[2,"Together_TN"] = sum(CheckList_togOS$TN)
NaiveBayesPerfMeas[2,"Together_FP"] = sum(CheckList_togOS$FP)
NaiveBayesPerfMeas[2,"Together_FN"] = sum(CheckList_togOS$FN)
# For testing data
predict_togOSTest <- predict(NB_togOS,CleanClass2014_3_tog)
CheckList_togOSTest = cbind.data.frame(CleanClass2014_3_tog$Drafted,predict_togOSTest)
names(CheckList_togOSTest)[names(CheckList_togOSTest)=="CleanClass2014_3_tog$Drafted"] <- "Y"
names(CheckList_togOSTest)[names(CheckList_togOSTest)=="predict_togOSTest"] <- "Pred"
CheckList_togOSTest = CheckList_togOSTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
NaiveBayesPerfMeasTest[2,"Together_TP"] = sum(CheckList_togOSTest$TP)
NaiveBayesPerfMeasTest[2,"Together_TN"] = sum(CheckList_togOSTest$TN)
NaiveBayesPerfMeasTest[2,"Together_FP"] = sum(CheckList_togOSTest$FP)
NaiveBayesPerfMeasTest[2,"Together_FN"] = sum(CheckList_togOSTest$FN)
# II. Naive Bayes Classifier - 07 to 13, QB ----------
#1 - Preparations ----------
# Training data
Data2007to2013_QBOS <- CleanClass2007to2014_3_oversampling[CleanClass2007to2014_3_oversampling$Position=="QB", ]
Data2007to2013_QBOS <- Data2007to2013_QBOS %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety, -Kickoff.Ret.TD, -Punt.Ret.TD) #these variables have zero variance
# Testing data
CleanClass2014_3_QB
#2 - Naive Bayes ----------
# Define features (x) and target (y)
features_QBOS <- setdiff(names(Data2007to2013_QBOS), "Drafted")
x_QBOS <- Data2007to2013_QBOS[,features_QBOS]
y_QBOS <- Data2007to2013_QBOS$Drafted
# Training a naive bayes model with 10-fold cross validation
set.seed(6969)
NB_QBOS <- train(x_QBOS,y_QBOS,method = "nb",trControl=trainControl(method='cv',number=10))
# Predictions for training data (2007 to 2013): 0.5 is used for probability cutoff value by default
predict_QBOS <- predict(NB_QBOS, newdata = Data2007to2013_QB)
confusionMatrix(predict_QBOS, Data2007to2013_QB$Drafted)
CheckList_QBOS = cbind.data.frame(Data2007to2013_QB$Drafted,predict_QBOS)
names(CheckList_QBOS)[names(CheckList_QBOS)=="Data2007to2013_QB$Drafted"] <- "Y"
names(CheckList_QBOS)[names(CheckList_QBOS)=="predict_QBOS"] <- "Pred"
CheckList_QBOS = CheckList_QBOS %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
NaiveBayesPerfMeas[2,"QB_TP"] = sum(CheckList_QBOS$TP)
NaiveBayesPerfMeas[2,"QB_TN"] = sum(CheckList_QBOS$TN)
NaiveBayesPerfMeas[2,"QB_FP"] = sum(CheckList_QBOS$FP)
NaiveBayesPerfMeas[2,"QB_FN"] = sum(CheckList_QBOS$FN)
# For testing data
predict_QBOSTest <- predict(NB_QBOS,CleanClass2014_3_QB)
CheckList_QBOSTest = cbind.data.frame(CleanClass2014_3_QB$Drafted,predict_QBOSTest)
names(CheckList_QBOSTest)[names(CheckList_QBOSTest)=="CleanClass2014_3_QB$Drafted"] <- "Y"
names(CheckList_QBOSTest)[names(CheckList_QBOSTest)=="predict_QBOSTest"] <- "Pred"
CheckList_QBOSTest = CheckList_QBOSTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
NaiveBayesPerfMeasTest[2,"QB_TP"] = sum(CheckList_QBOSTest$TP)
NaiveBayesPerfMeasTest[2,"QB_TN"] = sum(CheckList_QBOSTest$TN)
NaiveBayesPerfMeasTest[2,"QB_FP"] = sum(CheckList_QBOSTest$FP)
NaiveBayesPerfMeasTest[2,"QB_FN"] = sum(CheckList_QBOSTest$FN)
# III. Naive Bayes Classifier - 07 to 13, WR ----------
#1 - Preparations ----------
# Training data
Data2007to2013_WROS <- CleanClass2007to2014_3_oversampling[CleanClass2007to2014_3_oversampling$Position=="WR", ]
Data2007to2013_WROS <- Data2007to2013_WROS %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety) #these variables have zero variance.
# Testing data
CleanClass2014_3_WR
#2 - Naive Bayes ----------
# Define features (x) and target (y)
features_WROS <- setdiff(names(Data2007to2013_WROS), "Drafted")
x_WROS <- Data2007to2013_WROS[,features_WROS]
y_WROS <- Data2007to2013_WROS$Drafted
# Training a naive bayes model with 10-fold cross validation
set.seed(6969)
NB_WROS <- train(x_WROS,y_WROS,method = "nb",trControl=trainControl(method='cv',number=10))
# Predictions: 0.5 is used for probability cutoff value by default
predict_WROS <- predict(NB_WROS, newdata = Data2007to2013_WR)
confusionMatrix(predict_WROS, Data2007to2013_WR$Drafted)
CheckList_WROS = cbind.data.frame(Data2007to2013_WR$Drafted,predict_WROS)
names(CheckList_WROS)[names(CheckList_WROS)=="Data2007to2013_WR$Drafted"] <- "Y"
names(CheckList_WROS)[names(CheckList_WROS)=="predict_WROS"] <- "Pred"
CheckList_WROS = CheckList_WROS %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
NaiveBayesPerfMeas[2,"WR_TP"] = sum(CheckList_WROS$TP)
NaiveBayesPerfMeas[2,"WR_TN"] = sum(CheckList_WROS$TN)
NaiveBayesPerfMeas[2,"WR_FP"] = sum(CheckList_WROS$FP)
NaiveBayesPerfMeas[2,"WR_FN"] = sum(CheckList_WROS$FN)
# For testing data
predict_WROSTest <- predict(NB_WROS,CleanClass2014_3_WR)
CheckList_WROSTest = cbind.data.frame(CleanClass2014_3_WR$Drafted,predict_WROSTest)
names(CheckList_WROSTest)[names(CheckList_WROSTest)=="CleanClass2014_3_WR$Drafted"] <- "Y"
names(CheckList_WROSTest)[names(CheckList_WROSTest)=="predict_WROSTest"] <- "Pred"
CheckList_WROSTest = CheckList_WROSTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
NaiveBayesPerfMeasTest[2,"WR_TP"] = sum(CheckList_WROSTest$TP)
NaiveBayesPerfMeasTest[2,"WR_TN"] = sum(CheckList_WROSTest$TN)
NaiveBayesPerfMeasTest[2,"WR_FP"] = sum(CheckList_WROSTest$FP)
NaiveBayesPerfMeasTest[2,"WR_FN"] = sum(CheckList_WROSTest$FN)
# IV. Naive Bayes Classifier - 07 to 13, RB ----------
#1 - Preparations ----------
# Training data
Data2007to2013_RBOS <- CleanClass2007to2014_3_oversampling[CleanClass2007to2014_3_oversampling$Position=="RB", ]
Data2007to2013_RBOS <- Data2007to2013_RBOS %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety) #these variables have zero variance.
# Testing data
CleanClass2014_3_RB
#2 - Naive Bayes ----------
# Define features (x) and target (y)
features_RBOS <- setdiff(names(Data2007to2013_RBOS), "Drafted")
x_RBOS <- Data2007to2013_RBOS[,features_RBOS]
y_RBOS <- Data2007to2013_RBOS$Drafted
# Training a naive bayes model with 10-fold cross validation
set.seed(6969)
NB_RBOS <- train(x_RBOS,y_RBOS,method = "nb",trControl=trainControl(method='cv',number=10))
# Predictions: 0.5 is used for probability cutoff value by default
predict_RBOS <- predict(NB_RBOS, newdata = Data2007to2013_RB)
confusionMatrix(predict_RBOS, Data2007to2013_RB$Drafted)
CheckList_RBOS = cbind.data.frame(Data2007to2013_RB$Drafted,predict_RBOS)
names(CheckList_RBOS)[names(CheckList_RBOS)=="Data2007to2013_RB$Drafted"] <- "Y"
names(CheckList_RBOS)[names(CheckList_RBOS)=="predict_RBOS"] <- "Pred"
CheckList_RBOS = CheckList_RBOS %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
NaiveBayesPerfMeas[2,"RB_TP"] = sum(CheckList_RBOS$TP)
NaiveBayesPerfMeas[2,"RB_TN"] = sum(CheckList_RBOS$TN)
NaiveBayesPerfMeas[2,"RB_FP"] = sum(CheckList_RBOS$FP)
NaiveBayesPerfMeas[2,"RB_FN"] = sum(CheckList_RBOS$FN)
# For testing data
predict_RBOSTest <- predict(NB_RBOS,CleanClass2014_3_RB)
CheckList_RBOSTest = cbind.data.frame(CleanClass2014_3_RB$Drafted,predict_RBOSTest)
names(CheckList_RBOSTest)[names(CheckList_RBOSTest)=="CleanClass2014_3_RB$Drafted"] <- "Y"
names(CheckList_RBOSTest)[names(CheckList_RBOSTest)=="predict_RBOSTest"] <- "Pred"
CheckList_RBOSTest = CheckList_RBOSTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
NaiveBayesPerfMeasTest[2,"RB_TP"] = sum(CheckList_RBOSTest$TP)
NaiveBayesPerfMeasTest[2,"RB_TN"] = sum(CheckList_RBOSTest$TN)
NaiveBayesPerfMeasTest[2,"RB_FP"] = sum(CheckList_RBOSTest$FP)
NaiveBayesPerfMeasTest[2,"RB_FN"] = sum(CheckList_RBOSTest$FN)
# 3. Undersampling ###################################################
load("../Data/CleanData/CleanClass2007to2013_3_undersampling.Rdata")
# I. Naive Bayes Classifier - 07 to 13, together ----------
#1 - Preparations ----------
# Training data
CleanClass2007to2014_3_undersampling$Drafted <- as.factor(CleanClass2007to2014_3_undersampling$Drafted)
Data2007to2013_togUS <- CleanClass2007to2014_3_undersampling %>% select(-Position, -Class, -Name, -Player.Code, -Year,
-Safety) #this variable has zero variance.
# Testing data
CleanClass2014_3_tog
#2 - Naive Bayes ----------
# Define features (x) and target (y)
features_togUS <- setdiff(names(Data2007to2013_togUS), "Drafted")
x_togUS <- Data2007to2013_togUS[,features_togUS]
y_togUS <- Data2007to2013_togUS$Drafted
# Training a naive bayes model with 10-fold cross validation
set.seed(6969)
NB_togUS <- train(x_togUS,y_togUS,method = "nb",trControl=trainControl(method='cv',number=10))
# Predictions for training data (2007 to 2013): 0.5 is used for probability cutoff value by default
predict_togUS <- predict(NB_togUS, newdata = Data2007to2013_tog)
confusionMatrix(predict_togUS, Data2007to2013_tog$Drafted)
CheckList_togUS = cbind.data.frame(Data2007to2013_tog$Drafted,predict_togUS)
names(CheckList_togUS)[names(CheckList_togUS)=="Data2007to2013_tog$Drafted"] <- "Y"
names(CheckList_togUS)[names(CheckList_togUS)=="predict_togUS"] <- "Pred"
CheckList_togUS = CheckList_togUS %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
NaiveBayesPerfMeas[3,"Together_TP"] = sum(CheckList_togUS$TP)
NaiveBayesPerfMeas[3,"Together_TN"] = sum(CheckList_togUS$TN)
NaiveBayesPerfMeas[3,"Together_FP"] = sum(CheckList_togUS$FP)
NaiveBayesPerfMeas[3,"Together_FN"] = sum(CheckList_togUS$FN)
# For testing data
predict_togUSTest <- predict(NB_togUS,CleanClass2014_3_tog)
CheckList_togUSTest = cbind.data.frame(CleanClass2014_3_tog$Drafted,predict_togUSTest)
names(CheckList_togUSTest)[names(CheckList_togUSTest)=="CleanClass2014_3_tog$Drafted"] <- "Y"
names(CheckList_togUSTest)[names(CheckList_togUSTest)=="predict_togUSTest"] <- "Pred"
CheckList_togUSTest = CheckList_togUSTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
NaiveBayesPerfMeasTest[3,"Together_TP"] = sum(CheckList_togUSTest$TP)
NaiveBayesPerfMeasTest[3,"Together_TN"] = sum(CheckList_togUSTest$TN)
NaiveBayesPerfMeasTest[3,"Together_FP"] = sum(CheckList_togUSTest$FP)
NaiveBayesPerfMeasTest[3,"Together_FN"] = sum(CheckList_togUSTest$FN)
# II. Naive Bayes Classifier - 07 to 13, QB ----------
#1 - Preparations ----------
# Training data
Data2007to2013_QBUS <- CleanClass2007to2014_3_undersampling[CleanClass2007to2014_3_undersampling$Position=="QB", ]
Data2007to2013_QBUS <- Data2007to2013_QBUS %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety, -Kickoff.Ret.TD, -Punt.Ret.TD) #these variables have zero variance.
# Testing data
CleanClass2014_3_QB
#2 - Naive Bayes ----------
# Define features (x) and target (y)
features_QBUS <- setdiff(names(Data2007to2013_QBUS), "Drafted")
x_QBUS <- Data2007to2013_QBUS[,features_QBUS]
y_QBUS <- Data2007to2013_QBUS$Drafted
# Training a naive bayes model with 10-fold cross validation
set.seed(6969)
NB_QBUS <- train(x_QBUS,y_QBUS,method = "nb",trControl=trainControl(method='cv',number=10))
# Predictions for training data (2007 to 2013): 0.5 is used for probability cutoff value by default
predict_QBUS <- predict(NB_QBUS, newdata = Data2007to2013_QB)
confusionMatrix(predict_QBUS, Data2007to2013_QB$Drafted)
CheckList_QBUS = cbind.data.frame(Data2007to2013_QB$Drafted,predict_QBUS)
names(CheckList_QBUS)[names(CheckList_QBUS)=="Data2007to2013_QB$Drafted"] <- "Y"
names(CheckList_QBUS)[names(CheckList_QBUS)=="predict_QBUS"] <- "Pred"
CheckList_QBUS = CheckList_QBUS %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
NaiveBayesPerfMeas[3,"QB_TP"] = sum(CheckList_QBUS$TP)
NaiveBayesPerfMeas[3,"QB_TN"] = sum(CheckList_QBUS$TN)
NaiveBayesPerfMeas[3,"QB_FP"] = sum(CheckList_QBUS$FP)
NaiveBayesPerfMeas[3,"QB_FN"] = sum(CheckList_QBUS$FN)
# For testing data
predict_QBUSTest <- predict(NB_QBUS,CleanClass2014_3_QB)
CheckList_QBUSTest = cbind.data.frame(CleanClass2014_3_QB$Drafted,predict_QBUSTest)
names(CheckList_QBUSTest)[names(CheckList_QBUSTest)=="CleanClass2014_3_QB$Drafted"] <- "Y"
names(CheckList_QBUSTest)[names(CheckList_QBUSTest)=="predict_QBUSTest"] <- "Pred"
CheckList_QBUSTest = CheckList_QBUSTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
NaiveBayesPerfMeasTest[3,"QB_TP"] = sum(CheckList_QBUSTest$TP)
NaiveBayesPerfMeasTest[3,"QB_TN"] = sum(CheckList_QBUSTest$TN)
NaiveBayesPerfMeasTest[3,"QB_FP"] = sum(CheckList_QBUSTest$FP)
NaiveBayesPerfMeasTest[3,"QB_FN"] = sum(CheckList_QBUSTest$FN)
# III. Naive Bayes Classifier - 07 to 13, WR ----------
#1 - Preparations ----------
# Training data
Data2007to2013_WRUS <- CleanClass2007to2014_3_undersampling[CleanClass2007to2014_3_undersampling$Position=="WR", ]
Data2007to2013_WRUS <- Data2007to2013_WRUS %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety) #these variables have zero variance
# Testing data
CleanClass2014_3_WR
#2 - Naive Bayes ----------
# Define features (x) and target (y)
features_WRUS <- setdiff(names(Data2007to2013_WRUS), "Drafted")
x_WRUS <- Data2007to2013_WRUS[,features_WRUS]
y_WRUS <- Data2007to2013_WRUS$Drafted
# Training a naive bayes model with 10-fold cross validation
set.seed(6969)
NB_WRUS <- train(x_WRUS,y_WRUS,method = "nb",trControl=trainControl(method='cv',number=10))
# Predictions: 0.5 is used for probability cutoff value by default
predict_WRUS <- predict(NB_WRUS, newdata = Data2007to2013_WR)
confusionMatrix(predict_WRUS, Data2007to2013_WR$Drafted)
CheckList_WRUS = cbind.data.frame(Data2007to2013_WR$Drafted,predict_WRUS)
names(CheckList_WRUS)[names(CheckList_WRUS)=="Data2007to2013_WR$Drafted"] <- "Y"
names(CheckList_WRUS)[names(CheckList_WRUS)=="predict_WRUS"] <- "Pred"
CheckList_WRUS = CheckList_WRUS %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
NaiveBayesPerfMeas[3,"WR_TP"] = sum(CheckList_WRUS$TP)
NaiveBayesPerfMeas[3,"WR_TN"] = sum(CheckList_WRUS$TN)
NaiveBayesPerfMeas[3,"WR_FP"] = sum(CheckList_WRUS$FP)
NaiveBayesPerfMeas[3,"WR_FN"] = sum(CheckList_WRUS$FN)
# For testing data
predict_WRUSTest <- predict(NB_WRUS,CleanClass2014_3_WR)
CheckList_WRUSTest = cbind.data.frame(CleanClass2014_3_WR$Drafted,predict_WRUSTest)
names(CheckList_WRUSTest)[names(CheckList_WRUSTest)=="CleanClass2014_3_WR$Drafted"] <- "Y"
names(CheckList_WRUSTest)[names(CheckList_WRUSTest)=="predict_WRUSTest"] <- "Pred"
CheckList_WRUSTest = CheckList_WRUSTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
NaiveBayesPerfMeasTest[3,"WR_TP"] = sum(CheckList_WRUSTest$TP)
NaiveBayesPerfMeasTest[3,"WR_TN"] = sum(CheckList_WRUSTest$TN)
NaiveBayesPerfMeasTest[3,"WR_FP"] = sum(CheckList_WRUSTest$FP)
NaiveBayesPerfMeasTest[3,"WR_FN"] = sum(CheckList_WRUSTest$FN)
# IV. Naive Bayes Classifier - 07 to 13, RB ----------
#1 - Preparations ----------
# Training data
Data2007to2013_RBUS <- CleanClass2007to2014_3_undersampling[CleanClass2007to2014_3_undersampling$Position=="RB", ]
Data2007to2013_RBUS <- Data2007to2013_RBUS %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety) #these variables have zero variance
# Testing data
CleanClass2014_3_RB
#2 - Naive Bayes ----------
# Define features (x) and target (y)
features_RBUS <- setdiff(names(Data2007to2013_RBUS), "Drafted")
x_RBUS <- Data2007to2013_RBUS[,features_RBUS]
y_RBUS <- Data2007to2013_RBUS$Drafted
# Training a naive bayes model with 10-fold cross validation
set.seed(6969)
NB_RBUS <- train(x_RBUS,y_RBUS,method = "nb",trControl=trainControl(method='cv',number=10))
# Predictions: 0.5 is used for probability cutoff value by default
predict_RBUS <- predict(NB_RBUS, newdata = Data2007to2013_RB)
confusionMatrix(predict_RBUS, Data2007to2013_RB$Drafted)
CheckList_RBUS = cbind.data.frame(Data2007to2013_RB$Drafted,predict_RBUS)
names(CheckList_RBUS)[names(CheckList_RBUS)=="Data2007to2013_RB$Drafted"] <- "Y"
names(CheckList_RBUS)[names(CheckList_RBUS)=="predict_RBUS"] <- "Pred"
CheckList_RBUS = CheckList_RBUS %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
NaiveBayesPerfMeas[3,"RB_TP"] = sum(CheckList_RBUS$TP)
NaiveBayesPerfMeas[3,"RB_TN"] = sum(CheckList_RBUS$TN)
NaiveBayesPerfMeas[3,"RB_FP"] = sum(CheckList_RBUS$FP)
NaiveBayesPerfMeas[3,"RB_FN"] = sum(CheckList_RBUS$FN)
# For testing data
predict_RBUSTest <- predict(NB_RBUS,CleanClass2014_3_RB)
CheckList_RBUSTest = cbind.data.frame(CleanClass2014_3_RB$Drafted,predict_RBUSTest)
names(CheckList_RBUSTest)[names(CheckList_RBUSTest)=="CleanClass2014_3_RB$Drafted"] <- "Y"
names(CheckList_RBUSTest)[names(CheckList_RBUSTest)=="predict_RBUSTest"] <- "Pred"
CheckList_RBUSTest = CheckList_RBUSTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
NaiveBayesPerfMeasTest[3,"RB_TP"] = sum(CheckList_RBUSTest$TP)
NaiveBayesPerfMeasTest[3,"RB_TN"] = sum(CheckList_RBUSTest$TN)
NaiveBayesPerfMeasTest[3,"RB_FP"] = sum(CheckList_RBUSTest$FP)
NaiveBayesPerfMeasTest[3,"RB_FN"] = sum(CheckList_RBUSTest$FN)
# 4. Rose_both ###################################################
load("../Data/CleanData/CleanClass2007to2013_3_Rose.both.Rdata")
# I. Naive Bayes Classifier - 07 to 13, together ----------
#1 - Preparations ----------
# Training data
CleanClass2007to2014_3_Rose.both$Drafted <- as.factor(CleanClass2007to2014_3_Rose.both$Drafted)
Data2007to2013_togBO <- CleanClass2007to2014_3_Rose.both %>% select(-Position, -Class, -Name, -Player.Code, -Year,
-Safety) #this variable has zero variance
# Testing data
CleanClass2014_3_tog
#2 - Naive Bayes ----------
# Define features (x) and target (y)
features_togBO <- setdiff(names(Data2007to2013_togBO), "Drafted")
x_togBO <- Data2007to2013_togBO[,features_togBO]
y_togBO <- Data2007to2013_togBO$Drafted
# Training a naive bayes model with 10-fold cross validation
set.seed(6969)
NB_togBO <- train(x_togBO,y_togBO,method = "nb",trControl=trainControl(method='cv',number=10))
# Predictions for training data (2007 to 2013): 0.5 is used for probability cutoff value by default
predict_togBO <- predict(NB_togBO, newdata = Data2007to2013_tog)
confusionMatrix(predict_togBO, Data2007to2013_tog$Drafted)
CheckList_togBO = cbind.data.frame(Data2007to2013_tog$Drafted,predict_togBO)
names(CheckList_togBO)[names(CheckList_togBO)=="Data2007to2013_tog$Drafted"] <- "Y"
names(CheckList_togBO)[names(CheckList_togBO)=="predict_togBO"] <- "Pred"
CheckList_togBO = CheckList_togBO %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
NaiveBayesPerfMeas[4,"Together_TP"] = sum(CheckList_togBO$TP)
NaiveBayesPerfMeas[4,"Together_TN"] = sum(CheckList_togBO$TN)
NaiveBayesPerfMeas[4,"Together_FP"] = sum(CheckList_togBO$FP)
NaiveBayesPerfMeas[4,"Together_FN"] = sum(CheckList_togBO$FN)
# For testing data
predict_togBOTest <- predict(NB_togBO,CleanClass2014_3_tog)
CheckList_togBOTest = cbind.data.frame(CleanClass2014_3_tog$Drafted,predict_togBOTest)
names(CheckList_togBOTest)[names(CheckList_togBOTest)=="CleanClass2014_3_tog$Drafted"] <- "Y"
names(CheckList_togBOTest)[names(CheckList_togBOTest)=="predict_togBOTest"] <- "Pred"
CheckList_togBOTest = CheckList_togBOTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
NaiveBayesPerfMeasTest[4,"Together_TP"] = sum(CheckList_togBOTest$TP)
NaiveBayesPerfMeasTest[4,"Together_TN"] = sum(CheckList_togBOTest$TN)
NaiveBayesPerfMeasTest[4,"Together_FP"] = sum(CheckList_togBOTest$FP)
NaiveBayesPerfMeasTest[4,"Together_FN"] = sum(CheckList_togBOTest$FN)
# II. Naive Bayes Classifier - 07 to 13, QB ----------
#1 - Preparations ----------
# Training data
Data2007to2013_QBBO <- CleanClass2007to2014_3_Rose.both[CleanClass2007to2014_3_Rose.both$Position=="QB", ]
Data2007to2013_QBBO <- Data2007to2013_QBBO %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety, -Kickoff.Ret.TD, -Punt.Ret.TD) #these variables have zero variance
# Testing data
CleanClass2014_3_QB
#2 - Naive Bayes ----------
# Define features (x) and target (y)
features_QBBO <- setdiff(names(Data2007to2013_QBBO), "Drafted")
x_QBBO <- Data2007to2013_QBBO[,features_QBBO]
y_QBBO <- Data2007to2013_QBBO$Drafted
# Training a naive bayes model with 10-fold cross validation
set.seed(6969)
NB_QBBO <- train(x_QBBO,y_QBBO,method = "nb",trControl=trainControl(method='cv',number=10))
# Predictions for training data (2007 to 2013): 0.5 is used for probability cutoff value by default
predict_QBBO <- predict(NB_QBBO, newdata = Data2007to2013_QB)
confusionMatrix(predict_QBBO, Data2007to2013_QB$Drafted)
CheckList_QBBO = cbind.data.frame(Data2007to2013_QB$Drafted,predict_QBBO)
names(CheckList_QBBO)[names(CheckList_QBBO)=="Data2007to2013_QB$Drafted"] <- "Y"
names(CheckList_QBBO)[names(CheckList_QBBO)=="predict_QBBO"] <- "Pred"
CheckList_QBBO = CheckList_QBBO %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
NaiveBayesPerfMeas[4,"QB_TP"] = sum(CheckList_QBBO$TP)
NaiveBayesPerfMeas[4,"QB_TN"] = sum(CheckList_QBBO$TN)
NaiveBayesPerfMeas[4,"QB_FP"] = sum(CheckList_QBBO$FP)
NaiveBayesPerfMeas[4,"QB_FN"] = sum(CheckList_QBBO$FN)
# For testing data
predict_QBBOTest <- predict(NB_QBBO,CleanClass2014_3_QB)
CheckList_QBBOTest = cbind.data.frame(CleanClass2014_3_QB$Drafted,predict_QBBOTest)
names(CheckList_QBBOTest)[names(CheckList_QBBOTest)=="CleanClass2014_3_QB$Drafted"] <- "Y"
names(CheckList_QBBOTest)[names(CheckList_QBBOTest)=="predict_QBBOTest"] <- "Pred"
CheckList_QBBOTest = CheckList_QBBOTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
NaiveBayesPerfMeasTest[4,"QB_TP"] = sum(CheckList_QBBOTest$TP)
NaiveBayesPerfMeasTest[4,"QB_TN"] = sum(CheckList_QBBOTest$TN)
NaiveBayesPerfMeasTest[4,"QB_FP"] = sum(CheckList_QBBOTest$FP)
NaiveBayesPerfMeasTest[4,"QB_FN"] = sum(CheckList_QBBOTest$FN)
# III. Naive Bayes Classifier - 07 to 13, WR ----------
#1 - Preparations ----------
# Training data
Data2007to2013_WRBO <- CleanClass2007to2014_3_Rose.both[CleanClass2007to2014_3_Rose.both$Position=="WR", ]
Data2007to2013_WRBO <- Data2007to2013_WRBO %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety) #these variables have zero variance
# Testing data
CleanClass2014_3_WR
#2 - Naive Bayes ----------
# Define features (x) and target (y)
features_WRBO <- setdiff(names(Data2007to2013_WRBO), "Drafted")
x_WRBO <- Data2007to2013_WRBO[,features_WRBO]
y_WRBO <- Data2007to2013_WRBO$Drafted
# Training a naive bayes model with 10-fold cross validation
set.seed(6969)
NB_WRBO <- train(x_WRBO,y_WRBO,method = "nb",trControl=trainControl(method='cv',number=10))
# Predictions: 0.5 is used for probability cutoff value by default
predict_WRBO <- predict(NB_WRBO, newdata = Data2007to2013_WR)
confusionMatrix(predict_WRBO, Data2007to2013_WR$Drafted)
CheckList_WRBO = cbind.data.frame(Data2007to2013_WR$Drafted,predict_WRBO)
names(CheckList_WRBO)[names(CheckList_WRBO)=="Data2007to2013_WR$Drafted"] <- "Y"
names(CheckList_WRBO)[names(CheckList_WRBO)=="predict_WRBO"] <- "Pred"
CheckList_WRBO = CheckList_WRBO %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
NaiveBayesPerfMeas[4,"WR_TP"] = sum(CheckList_WRBO$TP)
NaiveBayesPerfMeas[4,"WR_TN"] = sum(CheckList_WRBO$TN)
NaiveBayesPerfMeas[4,"WR_FP"] = sum(CheckList_WRBO$FP)
NaiveBayesPerfMeas[4,"WR_FN"] = sum(CheckList_WRBO$FN)
# For testing data
predict_WRBOTest <- predict(NB_WRBO,CleanClass2014_3_WR)
CheckList_WRBOTest = cbind.data.frame(CleanClass2014_3_WR$Drafted,predict_WRBOTest)
names(CheckList_WRBOTest)[names(CheckList_WRBOTest)=="CleanClass2014_3_WR$Drafted"] <- "Y"
names(CheckList_WRBOTest)[names(CheckList_WRBOTest)=="predict_WRBOTest"] <- "Pred"
CheckList_WRBOTest = CheckList_WRBOTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
NaiveBayesPerfMeasTest[4,"WR_TP"] = sum(CheckList_WRBOTest$TP)
NaiveBayesPerfMeasTest[4,"WR_TN"] = sum(CheckList_WRBOTest$TN)
NaiveBayesPerfMeasTest[4,"WR_FP"] = sum(CheckList_WRBOTest$FP)
NaiveBayesPerfMeasTest[4,"WR_FN"] = sum(CheckList_WRBOTest$FN)
# IV. Naive Bayes Classifier - 07 to 13, RB ----------
#1 - Preparations ----------
# Training data
Data2007to2013_RBBO <- CleanClass2007to2014_3_Rose.both[CleanClass2007to2014_3_Rose.both$Position=="RB", ]
Data2007to2013_RBBO <- Data2007to2013_RBBO %>% select(-Class, -Position, -Name, -Player.Code, -Year,
-Safety) #these variables have zero variance.
# Testing data
CleanClass2014_3_RB
#2 - Naive Bayes ----------
# Define features (x) and target (y)
features_RBBO <- setdiff(names(Data2007to2013_RBBO), "Drafted")
x_RBBO <- Data2007to2013_RBBO[,features_RBBO]
y_RBBO <- Data2007to2013_RBBO$Drafted
# Training a naive bayes model with 10-fold cross validation
set.seed(6969)
NB_RBBO <- train(x_RBBO,y_RBBO,method = "nb",trControl=trainControl(method='cv',number=10))
# Predictions: 0.5 is used for probability cutoff value by default
predict_RBBO <- predict(NB_RBBO, newdata = Data2007to2013_RB)
confusionMatrix(predict_RBBO, Data2007to2013_RB$Drafted)
CheckList_RBBO = cbind.data.frame(Data2007to2013_RB$Drafted,predict_RBBO)
names(CheckList_RBBO)[names(CheckList_RBBO)=="Data2007to2013_RB$Drafted"] <- "Y"
names(CheckList_RBBO)[names(CheckList_RBBO)=="predict_RBBO"] <- "Pred"
CheckList_RBBO = CheckList_RBBO %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
NaiveBayesPerfMeas[4,"RB_TP"] = sum(CheckList_RBBO$TP)
NaiveBayesPerfMeas[4,"RB_TN"] = sum(CheckList_RBBO$TN)
NaiveBayesPerfMeas[4,"RB_FP"] = sum(CheckList_RBBO$FP)
NaiveBayesPerfMeas[4,"RB_FN"] = sum(CheckList_RBBO$FN)
# For testing data
predict_RBBOTest <- predict(NB_RBBO,CleanClass2014_3_RB)
CheckList_RBBOTest = cbind.data.frame(CleanClass2014_3_RB$Drafted,predict_RBBOTest)
names(CheckList_RBBOTest)[names(CheckList_RBBOTest)=="CleanClass2014_3_RB$Drafted"] <- "Y"
names(CheckList_RBBOTest)[names(CheckList_RBBOTest)=="predict_RBBOTest"] <- "Pred"
CheckList_RBBOTest = CheckList_RBBOTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
NaiveBayesPerfMeasTest[4,"RB_TP"] = sum(CheckList_RBBOTest$TP)
NaiveBayesPerfMeasTest[4,"RB_TN"] = sum(CheckList_RBBOTest$TN)
NaiveBayesPerfMeasTest[4,"RB_FP"] = sum(CheckList_RBBOTest$FP)
NaiveBayesPerfMeasTest[4,"RB_FN"] = sum(CheckList_RBBOTest$FN)
# 5. Smote ###################################################
load("../Data/CleanData/CleanClass2007to2013_3_smote.Rdata")
# I. Naive Bayes Classifier - 07 to 13, together ----------
#1 - Preparations ----------
# Training data
cleanData_smote$Drafted <- as.factor(cleanData_smote$Drafted)
Data2007to2013_togSM <- cleanData_smote %>% select(-Position, -Name, -Player.Code, -Year,
-Safety) #this variable has zero variance
# Testing data
CleanClass2014_3_tog
#2 - Naive Bayes ----------
# Define features (x) and target (y)
features_togSM <- setdiff(names(Data2007to2013_togSM), "Drafted")
x_togSM <- Data2007to2013_togSM[,features_togSM]
y_togSM <- Data2007to2013_togSM$Drafted
# Training a naive bayes model with 10-fold cross validation
set.seed(6969)
NB_togSM <- train(x_togSM,y_togSM,method = "nb",trControl=trainControl(method='cv',number=10))
# Predictions for training data (2007 to 2013): 0.5 is used for probability cutoff value by default
predict_togSM <- predict(NB_togSM, newdata = Data2007to2013_tog)
confusionMatrix(predict_togSM, Data2007to2013_tog$Drafted)
CheckList_togSM = cbind.data.frame(Data2007to2013_tog$Drafted,predict_togSM)
names(CheckList_togSM)[names(CheckList_togSM)=="Data2007to2013_tog$Drafted"] <- "Y"
names(CheckList_togSM)[names(CheckList_togSM)=="predict_togSM"] <- "Pred"
CheckList_togSM = CheckList_togSM %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
NaiveBayesPerfMeas[5,"Together_TP"] = sum(CheckList_togSM$TP)
NaiveBayesPerfMeas[5,"Together_TN"] = sum(CheckList_togSM$TN)
NaiveBayesPerfMeas[5,"Together_FP"] = sum(CheckList_togSM$FP)
NaiveBayesPerfMeas[5,"Together_FN"] = sum(CheckList_togSM$FN)
# For testing data
predict_togSMTest <- predict(NB_togSM,CleanClass2014_3_tog)
CheckList_togSMTest = cbind.data.frame(CleanClass2014_3_tog$Drafted,predict_togSMTest)
names(CheckList_togSMTest)[names(CheckList_togSMTest)=="CleanClass2014_3_tog$Drafted"] <- "Y"
names(CheckList_togSMTest)[names(CheckList_togSMTest)=="predict_togSMTest"] <- "Pred"
CheckList_togSMTest = CheckList_togSMTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
NaiveBayesPerfMeasTest[5,"Together_TP"] = sum(CheckList_togSMTest$TP)
NaiveBayesPerfMeasTest[5,"Together_TN"] = sum(CheckList_togSMTest$TN)
NaiveBayesPerfMeasTest[5,"Together_FP"] = sum(CheckList_togSMTest$FP)
NaiveBayesPerfMeasTest[5,"Together_FN"] = sum(CheckList_togSMTest$FN)
# II. Naive Bayes Classifier - 07 to 13, QB ----------
#1 - Preparations ----------
# Training data
Data2007to2013_QBSM <- cleanData_smote[cleanData_smote$Position=="QB", ]
Data2007to2013_QBSM <- Data2007to2013_QBSM %>% select(-Position, -Name, -Player.Code, -Year,
-Safety, -Kickoff.Ret.TD, -Punt.Ret.TD) #these variables have zero variance
# Testing data
CleanClass2014_3_QB
#2 - Naive Bayes ----------
# Define features (x) and target (y)
features_QBSM <- setdiff(names(Data2007to2013_QBSM), "Drafted")
x_QBSM <- Data2007to2013_QBSM[,features_QBSM]
y_QBSM <- Data2007to2013_QBSM$Drafted
# Training a naive bayes model with 10-fold cross validation
set.seed(6969)
NB_QBSM <- train(x_QBSM,y_QBSM,method = "nb",trControl=trainControl(method='cv',number=10))
# Predictions for training data (2007 to 2013): 0.5 is used for probability cutoff value by default
predict_QBSM <- predict(NB_QBSM, newdata = Data2007to2013_QB)
confusionMatrix(predict_QBSM, Data2007to2013_QB$Drafted)
CheckList_QBSM = cbind.data.frame(Data2007to2013_QB$Drafted,predict_QBSM)
names(CheckList_QBSM)[names(CheckList_QBSM)=="Data2007to2013_QB$Drafted"] <- "Y"
names(CheckList_QBSM)[names(CheckList_QBSM)=="predict_QBSM"] <- "Pred"
CheckList_QBSM = CheckList_QBSM %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
NaiveBayesPerfMeas[5,"QB_TP"] = sum(CheckList_QBSM$TP)
NaiveBayesPerfMeas[5,"QB_TN"] = sum(CheckList_QBSM$TN)
NaiveBayesPerfMeas[5,"QB_FP"] = sum(CheckList_QBSM$FP)
NaiveBayesPerfMeas[5,"QB_FN"] = sum(CheckList_QBSM$FN)
# For testing data
predict_QBSMTest <- predict(NB_QBSM,CleanClass2014_3_QB)
CheckList_QBSMTest = cbind.data.frame(CleanClass2014_3_QB$Drafted,predict_QBSMTest)
names(CheckList_QBSMTest)[names(CheckList_QBSMTest)=="CleanClass2014_3_QB$Drafted"] <- "Y"
names(CheckList_QBSMTest)[names(CheckList_QBSMTest)=="predict_QBSMTest"] <- "Pred"
CheckList_QBSMTest = CheckList_QBSMTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
NaiveBayesPerfMeasTest[5,"QB_TP"] = sum(CheckList_QBSMTest$TP)
NaiveBayesPerfMeasTest[5,"QB_TN"] = sum(CheckList_QBSMTest$TN)
NaiveBayesPerfMeasTest[5,"QB_FP"] = sum(CheckList_QBSMTest$FP)
NaiveBayesPerfMeasTest[5,"QB_FN"] = sum(CheckList_QBSMTest$FN)
# III. Naive Bayes Classifier - 07 to 13, WR ----------
#1 - Preparations ----------
# Training data
Data2007to2013_WRSM <- cleanData_smote[cleanData_smote$Position=="WR", ]
Data2007to2013_WRSM <- Data2007to2013_WRSM %>% select(-Position, -Name, -Player.Code, -Year,
-Safety) #these variables have zero variance
# Testing data
CleanClass2014_3_WR
#2 - Naive Bayes ----------
# Define features (x) and target (y)
features_WRSM <- setdiff(names(Data2007to2013_WRSM), "Drafted")
x_WRSM <- Data2007to2013_WRSM[,features_WRSM]
y_WRSM <- Data2007to2013_WRSM$Drafted
# Training a naive bayes model with 10-fold cross validation
set.seed(6969)
NB_WRSM <- train(x_WRSM,y_WRSM,method = "nb",trControl=trainControl(method='cv',number=10))
# Predictions: 0.5 is used for probability cutoff value by default
predict_WRSM <- predict(NB_WRSM, newdata = Data2007to2013_WR)
confusionMatrix(predict_WRSM, Data2007to2013_WR$Drafted)
CheckList_WRSM = cbind.data.frame(Data2007to2013_WR$Drafted,predict_WRSM)
names(CheckList_WRSM)[names(CheckList_WRSM)=="Data2007to2013_WR$Drafted"] <- "Y"
names(CheckList_WRSM)[names(CheckList_WRSM)=="predict_WRSM"] <- "Pred"
CheckList_WRSM = CheckList_WRSM %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
NaiveBayesPerfMeas[5,"WR_TP"] = sum(CheckList_WRSM$TP)
NaiveBayesPerfMeas[5,"WR_TN"] = sum(CheckList_WRSM$TN)
NaiveBayesPerfMeas[5,"WR_FP"] = sum(CheckList_WRSM$FP)
NaiveBayesPerfMeas[5,"WR_FN"] = sum(CheckList_WRSM$FN)
# For testing data
predict_WRSMTest <- predict(NB_WRSM,CleanClass2014_3_WR)
CheckList_WRSMTest = cbind.data.frame(CleanClass2014_3_WR$Drafted,predict_WRSMTest)
names(CheckList_WRSMTest)[names(CheckList_WRSMTest)=="CleanClass2014_3_WR$Drafted"] <- "Y"
names(CheckList_WRSMTest)[names(CheckList_WRSMTest)=="predict_WRSMTest"] <- "Pred"
CheckList_WRSMTest = CheckList_WRSMTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
NaiveBayesPerfMeasTest[5,"WR_TP"] = sum(CheckList_WRSMTest$TP)
NaiveBayesPerfMeasTest[5,"WR_TN"] = sum(CheckList_WRSMTest$TN)
NaiveBayesPerfMeasTest[5,"WR_FP"] = sum(CheckList_WRSMTest$FP)
NaiveBayesPerfMeasTest[5,"WR_FN"] = sum(CheckList_WRSMTest$FN)
# IV. Naive Bayes Classifier - 07 to 13, RB ----------
#1 - Preparations ----------
# Training data
Data2007to2013_RBSM <- cleanData_smote[cleanData_smote$Position=="RB", ]
Data2007to2013_RBSM <- Data2007to2013_RBSM %>% select(-Position, -Name, -Player.Code,-Year,
-Safety) #these variables have zero variance
# Testing data
CleanClass2014_3_RB
#2 - Naive Bayes ----------
# Define features (x) and target (y)
features_RBSM <- setdiff(names(Data2007to2013_RBSM), "Drafted")
x_RBSM <- Data2007to2013_RBSM[,features_RBSM]
y_RBSM <- Data2007to2013_RBSM$Drafted
# Training a naive bayes model with 10-fold cross validation
set.seed(6969)
NB_RBSM <- train(x_RBSM,y_RBSM,method = "nb",trControl=trainControl(method='cv',number=10))
# Predictions: 0.5 is used for probability cutoff value by default
predict_RBSM <- predict(NB_RBSM, newdata = Data2007to2013_RB)
confusionMatrix(predict_RBSM, Data2007to2013_RB$Drafted)
CheckList_RBSM = cbind.data.frame(Data2007to2013_RB$Drafted,predict_RBSM)
names(CheckList_RBSM)[names(CheckList_RBSM)=="Data2007to2013_RB$Drafted"] <- "Y"
names(CheckList_RBSM)[names(CheckList_RBSM)=="predict_RBSM"] <- "Pred"
CheckList_RBSM = CheckList_RBSM %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
# Performance Measurement
NaiveBayesPerfMeas[5,"RB_TP"] = sum(CheckList_RBSM$TP)
NaiveBayesPerfMeas[5,"RB_TN"] = sum(CheckList_RBSM$TN)
NaiveBayesPerfMeas[5,"RB_FP"] = sum(CheckList_RBSM$FP)
NaiveBayesPerfMeas[5,"RB_FN"] = sum(CheckList_RBSM$FN)
# For testing data
predict_RBSMTest <- predict(NB_RBSM,CleanClass2014_3_RB)
CheckList_RBSMTest = cbind.data.frame(CleanClass2014_3_RB$Drafted,predict_RBSMTest)
names(CheckList_RBSMTest)[names(CheckList_RBSMTest)=="CleanClass2014_3_RB$Drafted"] <- "Y"
names(CheckList_RBSMTest)[names(CheckList_RBSMTest)=="predict_RBSMTest"] <- "Pred"
CheckList_RBSMTest = CheckList_RBSMTest %>%
mutate(TP=ifelse(Y==Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(TN=ifelse(Y==Pred,ifelse(Pred==0,1,0),0)) %>%
mutate(FP=ifelse(Y!=Pred,ifelse(Pred==1,1,0),0)) %>%
mutate(FN=ifelse(Y!=Pred,ifelse(Pred==0,1,0),0))
NaiveBayesPerfMeasTest[5,"RB_TP"] = sum(CheckList_RBSMTest$TP)
NaiveBayesPerfMeasTest[5,"RB_TN"] = sum(CheckList_RBSMTest$TN)
NaiveBayesPerfMeasTest[5,"RB_FP"] = sum(CheckList_RBSMTest$FP)
NaiveBayesPerfMeasTest[5,"RB_FN"] = sum(CheckList_RBSMTest$FN)
# 5. Save NaiveBayesPerfMeas as a new dataset ###################################################
# Training
save(NaiveBayesPerfMeas, file="../Data/PerformanceMeasurement/NaiveBayesPerfMeas.Rdata")
# Testing
save(NaiveBayesPerfMeasTest, file="../Data/PerformanceMeasurement/NaiveBayesPerfMeasTest.Rdata")
<file_sep>/Data/READMEs/RM_PerformanceMeasurement.Rmd
---
title: "README for the performance measurement part"
author: "Group 2"
date: "`December, 2nd, 2019`"
output:
pdf_document:
fig_caption: yes
keep_tex: yes
html_document:
df_print: paged
abstract: This Chapter describes the evaluation of the best sampling and model combination, which is computed in the script 'PerformanceMeasurement.R'.
urlcolor: blue
---
## The goal of our performance measurement
After training a model it is important to check how well it performes. Since we not only trained one model but 120 in total (6 algorithms * 5 samplings * 4 positions), it is even more important to compare all the different models to see which one is the best. For our business case, this means that we want to see which combination of method and sampling we should take to predict which position, or if we even have a model, which does it better when we leave them together.
## How to compare the method/sampling combinations for all positions
After training the models with 10-fold cross-validation, we applied them on all the (unsampled) data from 2007 to 2013 to obtain the true positives, true negatives, false positives and false negatives on the training set. At the end of every model-script, we save this information separately, to bring it all together in the script 'PerformanceMeasurement.R'. At the same time (and in the same scripts) we also compute the testing fit on the 2014 testing data.
The first step, is to bring them all into one dataframe, to use them easier. Then we make sure, that we used the same, unsampled data by computing the sums of TP, TN, FP, FN, for every method/sampling/position combination. In the CheckTibble we now see, that for every position the whole column contains always the same number, which means this is the case.
Then we calculate the Accuracy, Precision, Recall and the F1 score:
$$\text{Accuracy} = \frac{\text{Correct Classifications}}{\text{All Classifications}} = \frac{TP + TN}{TP+TN+FP+FN}$$
$$\text{Precision (Positice Perdictive Value)} = \frac{TP}{TP+FP}$$
$$\text{Recall (Sensitivity)} = \frac{TP}{TP+FN}$$
$$\text{F1 score (harmonic mean of precision and recall)}= 2*\frac{Precision * Recall}{Precision + Recall}$$
The result is a table showing the Accuracy, Precision, Recall and F1 scores for all the 120 model/sampling/position combination. We decided to us the F1 score as the model estimator, because is more sensitive to the inequality of availability of drafted vs. undrafted players in the target value. Therefore we just visualize the F1 score and the accuracy of all the models for the testing set. This table is quite big, but it is still interesting to have a look at it, to see how well which combination performes.
\newpage
```{r echo=FALSE, include=FALSE}
library(tidyverse)
load("../PerformanceMeasurement/BestModels14.Rdata")
load("../PerformanceMeasurement/PerfMeasAllModels14.Rdata")
PerfMeasTibble14_F1 = PerfMeasTibble14 %>%
select("Method","Sampling","QB_F1", "WR_F1", "RB_F1", "Together_F1")
PerfMeasTibble14_Acc = PerfMeasTibble14 %>%
select("Method","Sampling","QB_Acc", "WR_Acc", "RB_Acc", "Together_Acc")
```
```{r echo = FALSE}
knitr::kable(PerfMeasTibble14_F1, caption = "F1 score by Model/Position and Sampling on 2014 unsampled testing data", digits=4)
knitr::kable(PerfMeasTibble14_Acc, caption = "Accuracy by Model/Position and Sampling on 2014 unsampled testing data", digits=4)
```
We can see these two tendencies in the models:
* Mostly better performance with unsampled data, than with sampled data
* Mostly better performance when we split the positions manually
The four following plots combine the information of both tibbles, showing the accuracy and the F1 score on every sampling method for every position on its own. The horizontal lines show the no information rates for the accuracy, which displays, how a model's performance would be when predicting only 0's.
```{r, echo = FALSE, out.width="95%"}
library(knitr)
include_graphics("Plots_8.jpg")
```
\newpage
## Our best models
Now let's have a look at the best model/sampling combination for every position:
```{r echo = FALSE}
knitr::kable(ResultTibble14, caption = "The best model/sampling combinations by position", digits=4)
```
As we see, measured at the F1-score, the atrificial neural networks perform best for QB's, RB's and all positions together. For the WR's it is the rose-both-sampled data trained random forest.
## Discussion
Our models with accuracies up to 91.8% seem to be very good at the first sight. But we have to keep the reason for filtering out the players with <10 played games and sampling the data in mind. Here we applied the models to the unsampled but filtered data of 2014, which only contains 7.01% of drafted players. This means, that a model predicting "not drafted" for every player would still perform better, since it would still have an accuracy of 92.99% on the whole data set (='Together'). In other words, our models all perform unter the 'no information rate', which makes them not really good.
Interpretations of models are always quite difficult, but the following thoughts are pretty likely to be true, according to the TP/TN/FP/FN. Our models are okay at predicting the likelihood of players being drafted that didn't perform very well in college football being low. It probably also does not too bad in predicting great players to be drafted, that nearly must be picked (and probably are picked early in the draft). But there must be much room for improvement for all the players that still performed well in college, and might or might not be drafted. Again in other words, the models can predict the more or less obvious drafts and non-drafts but is not really better than random for the intresting cases.
We would like to close the circle to one of our fist lessons in machine learning, in which we were taught the following very high level formula for models (the right part). Y denotes the true outcome (in our case whether a player is drafted or not), f(X) is the true pattern that describes it, and $\epsilon$ is the noise, which appears to be random. With our models (the left part) we try to predict a $\hat{Y}$, which shall be as close as possible to the true Y.
$$\hat{f}(X) = \hat{Y} \longrightarrow Y = f(X) + \varepsilon$$
Looking at this inequation we can think of three possibilities, why our models make so many mistakes:
* Our models $\hat{f_i}(X)$ do not include enough variables and/or are not sophisticated enough
* The data is not good enough
* The NFL draft contains a pretty large $\varepsilon$
Variables that are certainly missing in the model, are the ones that are not quantifyable easily, such as game intelligence, strenght of the own team, strenght of the opponents, maturity of the player and negative factors such as criminality, drug consumation and other negative behaviours. In the past, it happened again and again, that players with great game statistics and a great game intelligence, which were expected to be drafted in the first round fell far behind or were not drafted at all, because pictures of them consuming marihuana were published.
<file_sep>/Data/READMEs/RM_DataHandling.Rmd
---
title: "README for the data preparation part"
author: "Group 2"
date: "`December, 2nd, 2019`"
output:
pdf_document:
fig_caption: yes
keep_tex: yes
html_document:
df_print: paged
abstract: This Chapter is describes the process from raw to clean data, as well as
the data sources. In other words it explains the scripts 'DataCleaning2.R' and 'functionGetCleanClass2.R' and therefore also the function 'getCleanClass2'.
urlcolor: blue
---
## Description of the Goal
We want to predict wheter a college football Quarterback (QB), Running Back (RB) or Wide Receiver (WR) will be drafted into the NFL or not. For this purpose we try to combine data from different sources. This starts with game data from College Football and will be extended with further information like the NFL Combine or the Pro Day. Since we want to apply supervised learning, we would also need a 'Y', which contains the information, if a player was drafted or not.
## Data Source
Most of our data was uploaded some years ago to Kaggle (<https://www.kaggle.com/mhixon/college-football-statistics>), but has no relevant scprips that were made of it and try to predict the NFL Draft. These Datasets contain much more information about college football, than we would need. We only use following data sets for all the years:
* player-game-statistics.csv
+ One observation in these files contains the information about one player in one game.
* player.csv
+ One observation in these files contains information about height, weight, schools etc. of one player
The best data sets (in order of lenght) about the NFL Combine and the Pro Day we found, is also one from Kaggle (<https://www.kaggle.com/kbanta11/nfl-combine>). Unfortunately it turned out that, compared to the College data, not even 10% of the players have accessible Combine/Pro Day data, which is why we took them out again. Otherwise too many cells would contain NA and could not be analized with all the algorithms we want to use.
The third data set we integrate, is from Pro Football reference (<https://www.pro-football-reference.com/play-index/draft-finder.cgi?>). It contains the information about the NFL Drafts from the last couple of years. In terms of return on investment, the most reasonable option to obtain the data, was to filter the Years 2005 to 2019, the positions QB, WR, RB and then only keeping the Rows "Year", "Rnd", "Pick", "Player", "Pos", "Tm", "College.Univ". To reproduce it, this is the way: set the named filters -> Get Results -> Share & More -> Modify & Share table -> remove all other Variables we don't need -> comma-separated (in the yellow box) -> copy paste the data into a new .txt file.
## The data cleaning process
In order to clean the data fast and easy and with only a few manual steps, we build a function, that will provide us the clean data sets in the formate we want them to be. This function is called getCleanClass2 and is coded in the file 'functionGetCleanClass2.R'.
### Function 'getCleanClass2'
The function getCleanClass2 needs the inputs 'draftyear' (just year number), the player-game-statistics from the two years before the draft, the player list of the two previous years (both from first source on Kaggle) and the draft dataset (from the third source). Its output is a table with the information about one single draft year. The following explanations in plain text shall summarize the steps, if you desire more details, please see the comments in the file 'functionGetCleanClass2.R'.
getCleanClass2 will first drop all the variables, that are irrelevant for QB, RB and WR and remove observations, that don't contain any results in a game (e.g. 0's in every cell of a row). It adds a column called 'Games.Played', which allows to see, how many games were played to reach the other results, that are summed up. After that, information about the players is matched to the obtained data. Then the most important column is matched; the target value called 'Drafted', whicht is 0 if a player is not drafted and 1 if he is. Unfortunately the information about the draft is not available with the player code which is used for matching before, which means that the match has to be done by the name. This can result in some mistakes, which cannot be avoided. Then duplicates non-matchable players as well as variables that are available twice from matching are removed.
After these steps, the dataframe will contain four parts in every observation:
* Col 1 - 5: Information about the Player
* Col 6: Our Y called 'Drafted'
* Col 7 - 30: The summed game statistics of the previous year
* Col 31 - 54: The summed game statistics of two years prior to the draft
The next steps separate these parts and group the two years together, in order to obtain a dataframe with 30 variables containing the game stats of both years together. This has the advantage, that Players that could only be matched to the year before the draft still can be analized.
### Computing the clean data
In the script 'DataCleaning2.R' the function 'getCleanClass2' is applied to all the available years of data, to obtain a dataframe for every year. In the last part, all these dataframes are rbind-ed together and cleaned from duplicates. This overall precedure allows us to obtain all eligable players on the first hand and than only keep the latest information about players that played a senior year (those who have been drafted in their junior year will appear with their junior year).
### Validation of the Data and further cleaning
Just by having a look at the different rows (=players) can already help to see wheter the data seems to be right or not. Most of the data seems to be plausible, but for example the QB <NAME> has not his total performance in our data.
```{r, include=FALSE}
library(tidyverse)
JimmyTibble = data.frame(Drafted=c(1,1), Pass.Att = c(127,1108), Pass.Comp=c(80,706), Pass.Yards = c(1036,8873), Pass.TD = c(10, 84), Games.Played = c(3,26), row.names = c("Our Data", "Wikipedia"))
load("../CleanData/CleanClass2007to2014_2.Rdata")
Games.Played_DraftedPlayers = CleanClass2007to2014_2$Games.Played[CleanClass2007to2014_2$Drafted==1]
Games.Played_AllPlayers = CleanClass2007to2014_2$Games.Played
```
```{r echo = FALSE}
knitr::kable(JimmyTibble, caption = "Comparison of Jimmy Garoppolo's performance")
```
Unfortunately, the missing 23 games could not be found anywhere in the input data, which means, that there must be some errors in the data. An other way to look at this issue is by looking at the histograms of "Games.Played" of all players and compare it to the histogram of the drafted ones.
```{r, fig.height=3, echo=FALSE}
hist(Games.Played_AllPlayers)
hist(Games.Played_DraftedPlayers)
```
As we see in the histograms, there is a majority of players that played less than 5 games (at least according to our data) and some of them have been drafted. It seems very unlikely, that a player is drafted if he played less than 5 games in the two seasons prior to the draft. Therefore it is very likely, that all the drafted players with less than 5 played games are mistakes. As we see in the second histogram, there were no players drafted haveing played between 5 and 9 games. Looking at our business case, we decided to eliminate all players with less than 10 played games in the data. This eliminates the errors and players, that a priori don't have a chance to be drafted.
The new data frame is called 'CleanClass2007to2014_3.Rdata'.
### Sampling the data
Because our target value is distributed very inequally (see table below), we applied 4 different sampling methods, to approach this issue. The methods are called 'oversampling', 'undersampling', 'rose both' and 'smote'. We then cross-validate the different data in all the models, to find the best data to use for training the optimal model.
```{r, include=FALSE}
DataComp = data.frame(Observations=c(6372,3008,4022,654,2338,4221), Drafted_Ratio = c("06.24%","12.43%","50.00%","50.00%","50.81%","52.35%"), row.names = c("Before filter","No Sampling", "Oversampling", "Undersampling", "Rose Both", "Smote"))
```
```{r echo = FALSE}
knitr::kable(DataComp, caption = "Number of observations and Ratio of Drafted Players in the Dataframes")
```
As we see, applying the filter Games.played >= 10 already decreases the inequality of drafted vs. undrafted players by dismissing players with a chance of being drafted close to zero or have errors in the data. By sampling the data we can make the distribution close to fifty/fifty, but if this will really improve our predictions needs to be cross-validated with the models. Since the filter shall dismiss the errors, we only continue with the filtered data (one set unsampled and four sampled).
## The clean data
After all the described processes the five dataframes (one unsampled and four sampled) contain following variables:
* Player.Code: A unique Number for matching the data
* Name: Name of the Player
* Class: A factor showing the college year the player was in when being in draft class with levels: JR=Junior (3.year) and SR=Senior (4.year)
* Position: A factor with Position of the Player (filtered for only QB=Quarterback, RB=Runningback, WR=Wide Receiver)
* Year: Shows the year the player was in the draft class
* Drafted: The targe which is 1 when a player was drafted and 0 when a player was not drafted
* Rush.Att: Summed rushing attempts over both seasons (mainly for RB)
* Rush.Yard: Summed rushing yards over both seasons (mainly for RB)
* Rush.TD: Summed rushing TD over both seasons (mainly for RB)
* Pass.Att: Summed passing attempts over both seasons (mainly for QB)
* Pass.Comp: Summed passing completions over both seasons (mainly for QB)
* Pass.Yard: Summed passing yards over both seasons (mainly for QB)
* Pass.TD: Summed passing TD over both seasons (mainly for QB)
* Pass.Int: Summed Inteceptions thrown over both seasons (mainly for QB)
* Pass.Conv: Summed thrown 2-pt conversion over both seasons (mainly for QB)
* Rec: Summed receptions over both seasons (mainly for WR)
* Rec.Yards: Summed reception yards over both seasons (mainly for WR)
* Rec.TD: Summed reception TD over both seasons (mainly for WR)
* Kickoff.Ret: Summed Kickoff returns over both seasons (mainly for WR/RB)
* Kickoff.Ret.Yard: Summed Kickoff return yards over both seasons (mainly for WR/RB)
* Kickoff.Ret.TD: Summed Kickoff return TD over both seasons (mainly for WR/RB)
* Punt.Ret: Summed punt returns over both seasons (mainly for WR/RB)
* Punt.Ret.Yard: Summed punt return yards over both seasons (mainly for WR/RB)
* Punt.Ret.TD: Summed punt return TD over both seasons (mainly for WR/RB)
* Off.2XP.Att: Summed 2 point conversion attempts over both seasons
* Off.2XP.Made: Summed 2 point conversions made over both seasons
* Safety: Being tackeled in the own end zone summed over both seasons(=2 pt for opponent)
* Fumble: Dropped balls summed over both seasons
* Fumble.Lost: Dropped balls recovered by opponent summed over both seasons
* Games.Played: Number of games played over both seasons (filter applied: >= 10)
<file_sep>/Project_Scripts/Data_Sampling_ROSE.R
rm(list=ls())
graphics.off()
# Libraries
library(ROSE) # Random Over-Sampling Examples:
# It involves the creation of a new data set by suitably resampling the observations
# belonging to the two classes. Function ovun.sample embeds consolidated resampling techniques
# to perform such a task and considers different sampling schemes. It is endowed with the argument
# method, which takes one value among "over","under" and "both".
# Load data
load("../Data/CleanData/CleanClass2007to2014_3.Rdata")
# Drop testing data (Year 2014)
CleanClass2007to2013_3 <- CleanClass2007to2014_3[CleanClass2007to2014_3$Year != 2014,]
# Class distribution
table(CleanClass2007to2013_3$Drafted)
# 1. Oversampling ------
# We randomly duplicate samples from the class with fewer instances, here class 1 (drafted), so as to match the number of samples in each class.
# While we avoid loosing information with this approach, we also run the risk of overfitting our model as we are more likely to get
# the same samples in the training data. This could lead to an overestimation of our modelโs performance and generalizability.
# In ROSE: Option "over" determines oversampling with replacement from the minority class, here class 1 (drafted), until
# the specified sample size N is reached. Since the prevalent class, here class 0, amounts to 2011 observations, to obtain a balanced
# sample by oversampling, we need to set the new sample size to 4022 (=2*2011). (Alternatively, we may design the oversampling
# by setting argument p, which represents the probability of the positive class in the new augmented sample. In this case,
# the proportion of positive examples will be only approximatively equal to the specified p.)
CleanClass2007to2014_3_oversampling <- ovun.sample(Drafted~., data=CleanClass2007to2013_3, method="over",N=4022)
CleanClass2007to2014_3_oversampling <- as.data.frame(CleanClass2007to2014_3_oversampling$data)
table(CleanClass2007to2014_3_oversampling$Drafted)
save(CleanClass2007to2014_3_oversampling, file="../Data/CleanData/CleanClass2007to2013_3_oversampling.Rdata")
# 2. Undersampling ------
# We randomly select a subset of samples from the class with more instances, here class 0, to match the number of samples coming from
# each class. In our context, we randomly pick 327 out of the 2011 not drafted cases. The main disadvantage of undersampling is that
# we loose potentially relevant information from the left-out samples.
# In ROSE: Option "under" determines simple undersampling without replacement of the majority class, here class 0, until the specified
# sample size N is reached. Since the minority class, here class 1, amounts to 327 observations, to obtain a balanced sample by undersampling,
# we need to set the new sample size to 654 (=2*327). (Alternatively, we may design the undersampling by setting argument p, see explenation above).
CleanClass2007to2014_3_undersampling <- ovun.sample(Drafted~., data=CleanClass2007to2013_3, method="under" ,N=654)
CleanClass2007to2014_3_undersampling <- as.data.frame(CleanClass2007to2014_3_undersampling$data)
table(CleanClass2007to2014_3_undersampling$Drafted)
save(CleanClass2007to2014_3_undersampling, file="../Data/CleanData/CleanClass2007to2013_3_undersampling.Rdata")
# 3. Both; ROSE ------
# When option "both" is selected, both the minority class, here class 1, is oversampled with replacement and the majority class, here class 0, is
# undersampled (without replacement). In this case, both the arguments N and p have to be set to establish the amount of oversampling and undersampling.
# Essentially, the minority class is oversampled to reach a size determined as a realization of a binomial random variable with size N and probability p.
# Undersampling is then performed accordingly, to abide by the specified N.
CleanClass2007to2014_3_Rose.both <- ovun.sample(Drafted~., data=CleanClass2007to2013_3, method="both",
p=0.5, # probability of the minority class, by default 0.5.
seed=6969, # specify random seed
N=2338) # total specified sampled according to initial sample size of our train data
CleanClass2007to2014_3_Rose.both <- as.data.frame(CleanClass2007to2014_3_Rose.both$data)
table(CleanClass2007to2014_3_Rose.both$Drafted)
save(CleanClass2007to2014_3_Rose.both, file="../Data/CleanData/CleanClass2007to2013_3_Rose.both.Rdata")
<file_sep>/Project_Scripts/ANN.R
library(h2o)
library(tidyverse)
# init ----
# Create sigmoid function
sigmoid <- function(z){
out <- 1 / (1 + exp(-z))
return(out)
}
# Create standardization function
standFun <- function(x){
out <- (x - mean(x))/sd(x)
return(out)
}
# Prepare for between-model comparison of the training fit
# !!! ONLY RUN THIS ONCE !!!
ANNPerfMeas = data.frame(Method = character(), Sampling = character(),
QB_TP = integer(), QB_TN = integer(), QB_FP = integer(), QB_FN = integer(),
WR_TP = integer(), WR_TN = integer(), WR_FP = integer(), WR_FN = integer(),
RB_TP = integer(), RB_TN = integer(), RB_FP = integer(), RB_FN = integer(),
Together_TP = integer(), Together_TN = integer(), Together_FP = integer(), Together_FN = integer(),
stringsAsFactors = FALSE)
ANNPerfMeas[1, 2] = "no_sampling"
ANNPerfMeas[2, 2] = "oversampling"
ANNPerfMeas[3, 2] = "undersampling"
ANNPerfMeas[4, 2] = "Rose_both"
ANNPerfMeas[5, 2] = "Smote"
ANNPerfMeas$Method = "ANN"
# Prepare for between-model comparison with 2014 as testing data
# !!! ONLY RUN THIS ONCE !!!
ANNPerfMeas2014 = data.frame(Method = character(), Sampling = character(),
QB_TP = integer(), QB_TN = integer(), QB_FP = integer(), QB_FN = integer(),
WR_TP = integer(), WR_TN = integer(), WR_FP = integer(), WR_FN = integer(),
RB_TP = integer(), RB_TN = integer(), RB_FP = integer(), RB_FN = integer(),
Together_TP = integer(), Together_TN = integer(), Together_FP = integer(), Together_FN = integer(),
stringsAsFactors = FALSE)
ANNPerfMeas2014[1, 2] = "no_sampling"
ANNPerfMeas2014[2, 2] = "oversampling"
ANNPerfMeas2014[3, 2] = "undersampling"
ANNPerfMeas2014[4, 2] = "Rose_both"
ANNPerfMeas2014[5, 2] = "Smote"
ANNPerfMeas2014$Method = "ANN"
# Load cleaned data
# Uncomment one of the lines to use the respective sampling
load("../Data/CleanData/CleanClass2007to2014_3.RData") # no sampling, don't comment this line out!
# load("../Data/CleanData/CleanClass2007to2013_3_oversampling.RData") # oversampling
# load("../Data/CleanData/CleanClass2007to2013_3_undersampling.RData") # undersampling
# load("../Data/CleanData/CleanClass2007to2013_3_Rose.both.RData") # ROSE both
# load("../Data/CleanData/CleanClass2007to2013_3_smote.RData") # SMOTE
# Uncomment one of the lines to use the respective sampling
cleanData <- as_tibble(CleanClass2007to2014_3) # no sampling, don't comment this line out!
# cleanData_s <- as_tibble(CleanClass2007to2014_3_oversampling) # oversampling
# cleanData_s <- as_tibble(CleanClass2007to2014_3_undersampling) # undersampling
# cleanData_s <- as_tibble(CleanClass2007to2014_3_Rose.both) # ROSE both
# cleanData_s <- as_tibble(cleanData_smote) # SMOTE
# Define the ANN cost function
ANN_cost <- function(ANN_par, L_i_size, L_h_size, L_o_size, x, y, lambda){
# Separate the ANN_par matrix back into the two thetas
theta1 <- matrix(ANN_par[1:(L_h_size * (L_i_size + 1))], nrow = L_h_size)
theta2 <- matrix(ANN_par[(1 + (L_h_size * (L_i_size + 1))):length(ANN_par)], nrow = L_o_size)
# Prepare gradients to return to optimization function
J <- 0
theta1_grad <- matrix(0, nrow(theta1), ncol(theta1))
theta2_grad <- matrix(0, nrow(theta2), ncol(theta2))
# Create output matrix
y_new <- matrix(0, n, L_o_size)
# Map output vector to binary vector
for (i in 1:n) {
y_new[i, y[i]] <- 1
}
# Add column of 1s for bias
x <- cbind(1, x)
# Compute the output for the current thetas
H1 <- sigmoid(x %*% t(theta1))
H2 <- cbind(rep(1,n), H1)
H <- sigmoid(H2 %*% t(theta2))
# Compute the log-likelihood for optimization
J <- vector()
for (i in 1:n){
J[i] <- (sum(-y_new[i,] %*% log(H[i,]) - (1-y_new[i,]) %*% log(1-H[i,]))) +
(lambda/(2*n)) * (sum(sum(theta1[,2:dim(theta1)[2] ]^2)) + sum(sum(theta2[,2:dim(theta2)[2]]^2)) )
}
J_new <- sum(J)/n
J <- J_new
}
# Define the function to calculate the gradient of the cost function
ANN_grad <- function(ANN_par, L_i_size, L_h_size, L_o_size, x, y, lambda){
# Separate the ANN_par matrix back into the thetas
theta1 <- matrix(ANN_par[1:(L_h_size * (L_i_size + 1))], nrow = L_h_size)
theta2 <- matrix(ANN_par[(1 + (L_h_size * (L_i_size + 1))):length(ANN_par)], nrow = L_o_size)
# Create placeholder matrices for the gradients
theta1_grad <- matrix(0, nrow(theta1), ncol(theta1))
theta2_grad <- matrix(0, nrow(theta2), ncol(theta2))
# Create output matrix
y_new <- matrix(0, n, L_o_size)
# Map y vector to binary vector
for (i in 1:n) {
y_new[i, y[i]] <- 1
}
# Add column of 1s to the input matrix for bias
x <- cbind(1, x)
# Create placeholders for gradient calculation
a_2 <- matrix(0, L_h_size, 1)
a_2 <- rbind( 1 , a_2)
a_3 <- matrix(0, n, L_o_size)
z_2 <- matrix(0, L_i_size, 1)
z_3 <- matrix(0, L_h_size, 1)
D1 <- matrix(0, L_h_size, L_i_size + 1)
D2 <- matrix(0, L_o_size, L_h_size + 1)
# Calculate negative gradient for every input
for (t in 1:n) {
a_1 <- x[t,]
z_2 <- theta1 %*% a_1
a_2 <- rbind(1, sigmoid(z_2))
z_3 <- theta2 %*% a_2
a_3 <- sigmoid(z_3)
delta_3 <- a_3 - y_new[t,]
tmp <- (t(theta2) %*% delta_3)
delta_2 <- tmp[2:length(tmp)] * sigmoid(z_2) * (1 - sigmoid(z_2))
D1 <- D1 + delta_2 %*% a_1
D2 <- D2 + delta_3 %*% t(a_2)
}
theta1_grad <- D1/n
theta1_grad[,2:ncol(theta1_grad)] <- theta1_grad[, 2:ncol(theta1_grad)] + (lambda/n) * theta1[, 2:ncol(theta1_grad)]
theta2_grad <- D2/n
theta2_grad[,2:ncol(theta2_grad)] <- theta2_grad[, 2:ncol(theta2_grad)] + (lambda/n) * theta2[, 2:ncol(theta2_grad)]
grad <- c(as.vector(theta1_grad), as.vector(theta2_grad))
}
# ANN for QB ----
# Select years 2007 through 2013 as training data
cleanData_QB <- cleanData %>% filter(., Year < 2014, Position == "QB") %>% drop_na(.)
# !!! Use the following line when working with ANY sampled data !!!
# cleanData_QB <- cleanData_s %>% filter(., Year < 2014, Position == "QB") %>% drop_na(.)
# Drop unimportant variables
x <- as.matrix(cleanData_QB %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted))
# !!! Use the following line ONLY when working with SMOTE sampled data and LEAVE COMMENTED otherwise !!!
# x <- as.matrix(cleanData_QB %>% select(., -Player.Code, -Name, -Position, -Year, -Drafted))
y <- as.integer(as.vector(cleanData_QB$Drafted))
# Standardize the training data
for (i in 1:ncol(x)) {
x[,i] <- standFun(x[,i])
}
# Replace variables that are consistently zero and therefore yield NA when standardized with zero again
x[is.na(x)] <- 0
# Parameters of ANN for QBs
L_i_size <- ncol(x)
L_h_size <- 10
L_o_size <- 1
n <- length(y)
theta1 <- matrix(1, nrow = L_h_size, ncol = L_i_size + 1)
theta2 <- matrix(1, nrow = L_o_size, ncol = L_h_size + 1)
ANN_par <- c(as.vector(theta1), as.vector(theta2))
# Make verbose to print every 5th iteration
options <- list(trace = 5)
# Optimize cost function
BP_pred <- nlminb(start = ANN_par,
objective = ANN_cost,
gradient = ANN_grad,
hessian = NULL,
L_i_size = L_i_size,
L_h_size = L_h_size,
L_o_size = L_o_size,
x = x, y = y,
lambda = 1,
control = options)
# Retrieve the theta vector found through backpropagation
BP_par <- BP_pred$par
# Separate theta vector from backpropagation into thetas
theta1_train <- matrix(data = BP_par[1:(L_h_size * (L_i_size + 1))], nrow = L_h_size)
theta2_train <- matrix(data = BP_par[(1 + (L_h_size * (L_i_size + 1))):length(BP_par)], nrow = L_o_size)
# Exploring the training fit on unsampled data
D_uns <- cleanData %>% filter(., Year < 2014, Position == "QB") %>% drop_na(.)
x_uns <- as.matrix(D_uns %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted))
# Standardize the unsampled training data
for (i in 1:ncol(x_uns)) {
x_uns[,i] <- standFun(x_uns[,i])
}
# Replace variables that are consistently zero and therefore yield NA when standardized with zero again
x_uns[is.na(x_uns)] <- 0
a_1 <- rbind(1, t(x_uns))
a_2 <- rbind(1, sigmoid(theta1_train %*% a_1))
a_3 <- sigmoid(theta2_train %*% a_2)
train_QB <- tibble("Code" = D_uns$Player.Code, "Name" = D_uns$Name, "Drafted" = D_uns$Drafted, "pred" = as.vector(a_3))
# Take the year 2014 as testing data
cleanData_QB_test <- cleanData %>% filter(., Position == "QB", Year == 2014) %>% drop_na(.)
x_test <- as.matrix(cleanData_QB_test %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted))
y_test <- pull(cleanData_QB_test %>% select(., Drafted))
# Standardize the testing data and replace NA with zero
for (i in 1:ncol(x_test)) {
x_test[,i] <- standFun(x_test[,i])
}
x_test[is.na(x_test)] <- 0
# Make predictions with the thetas found in backpropagation
a_1 <- rbind(1, t(x_test))
a_2 <- rbind(1, sigmoid(theta1_train %*% a_1))
a_3 <- sigmoid(theta2_train %*% a_2)
# Summarize the results
pred_QB <- tibble("Code" = cleanData_QB_test$Player.Code, "Name" = cleanData_QB_test$Name, "pred" = as.vector(a_3), "Drafted" = cleanData_QB_test$Drafted)
# ANN for RB ----
# Select years 2007 through 2013 as training data
cleanData_RB <- cleanData %>% filter(., Year < 2014, Position == "RB") %>% drop_na(.)
# !!! Use the following line when working with ANY sampled data !!!
# cleanData_RB <- cleanData_s %>% filter(., Year < 2014, Position == "RB") %>% drop_na(.)
# Drop unimportant variables
x <- as.matrix(cleanData_RB %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted))
# !!! Use the following line ONLY when working with SMOTE sampled data and LEAVE COMMENTED otherwise !!!
# x <- as.matrix(cleanData_RB %>% select(., -Player.Code, -Name, -Position, -Year, -Drafted))
y <- as.integer(as.vector(cleanData_RB$Drafted))
# Standardize the training data
for (i in 1:ncol(x)) {
x[,i] <- standFun(x[,i])
}
# Replace variables that are consistently zero and therefore yield NA when standardized with zero again
x[is.na(x)] <- 0
# Parameters of ANN for RBs
L_i_size <- ncol(x)
L_h_size <- 10
L_o_size <- 1
n <- length(y)
theta1 <- matrix(1, nrow = L_h_size, ncol = L_i_size + 1)
theta2 <- matrix(1, nrow = L_o_size, ncol = L_h_size + 1)
ANN_par <- c(as.vector(theta1), as.vector(theta2))
# Make verbose to print every 5th iteration
options <- list(trace = 5)
# Optimize cost function
BP_pred <- nlminb(start = ANN_par,
objective = ANN_cost,
gradient = ANN_grad,
hessian = NULL,
L_i_size = L_i_size,
L_h_size = L_h_size,
L_o_size = L_o_size,
x = x, y = y,
lambda = 1,
control = options)
# Retrieve the theta vector found through backpropagation
BP_par <- BP_pred$par
# Separate theta vector from backpropagation into thetas
theta1_train <- matrix(data = BP_par[1:(L_h_size * (L_i_size + 1))], nrow = L_h_size)
theta2_train <- matrix(data = BP_par[(1 + (L_h_size * (L_i_size + 1))):length(BP_par)], nrow = L_o_size)
# Exploring the training fit on unsampled data
D_uns <- cleanData %>% filter(., Year < 2014, Position == "RB") %>% drop_na(.)
x_uns <- as.matrix(D_uns %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted))
# Standardize the unsampled training data
for (i in 1:ncol(x_uns)) {
x_uns[,i] <- standFun(x_uns[,i])
}
# Replace variables that are consistently zero and therefore yield NA when standardized with zero again
x_uns[is.na(x_uns)] <- 0
a_1 <- rbind(1, t(x_uns))
a_2 <- rbind(1, sigmoid(theta1_train %*% a_1))
a_3 <- sigmoid(theta2_train %*% a_2)
train_RB <- tibble("Code" = D_uns$Player.Code, "Name" = D_uns$Name, "Drafted" = D_uns$Drafted, "pred" = as.vector(a_3))
# Take the year 2014 as testing data
cleanData_RB_test <- cleanData %>% filter(., Position == "RB", Year == 2014) %>% drop_na(.)
x_test <- as.matrix(cleanData_RB_test %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted))
y_test <- pull(cleanData_RB_test %>% select(., Drafted))
# Standardize the testing data and replace NA with zero
for (i in 1:ncol(x_test)) {
x_test[,i] <- standFun(x_test[,i])
}
x_test[is.na(x_test)] <- 0
# Make predictions with the thetas found in backpropagation
a_1 <- rbind(1, t(x_test))
a_2 <- rbind(1, sigmoid(theta1_train %*% a_1))
a_3 <- sigmoid(theta2_train %*% a_2)
# Summarize the results
pred_RB <- tibble("Code" = cleanData_RB_test$Player.Code, "Name" = cleanData_RB_test$Name, "pred" = as.vector(a_3), "Drafted" = cleanData_RB_test$Drafted)
# ANN for WR ----
# Select years 2007 through 2013 as training data
cleanData_WR <- cleanData %>% filter(., Year < 2014, Position == "WR") %>% drop_na(.)
# !!! Use the following line when working with ANY sampled data !!!
# cleanData_WR <- cleanData_s %>% filter(., Year < 2014, Position == "WR") %>% drop_na(.)
# Drop unimportant variables
x <- as.matrix(cleanData_WR %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted))
# !!! Use the following line ONLY when working with SMOTE sampled data and LEAVE COMMENTED otherwise !!!
# x <- as.matrix(cleanData_WR %>% select(., -Player.Code, -Name, -Position, -Year, -Drafted))
y <- as.integer(as.vector(cleanData_WR$Drafted))
# Standardize the training data
for (i in 1:ncol(x)) {
x[,i] <- standFun(x[,i])
}
# Replace variables that are consistently zero and therefore yield NA when standardized with zero again
x[is.na(x)] <- 0
# Parameters of ANN for WRs
L_i_size <- ncol(x)
L_h_size <- 10
L_o_size <- 1
n <- length(y)
theta1 <- matrix(1, nrow = L_h_size, ncol = L_i_size + 1)
theta2 <- matrix(1, nrow = L_o_size, ncol = L_h_size + 1)
ANN_par <- c(as.vector(theta1), as.vector(theta2))
# Make verbose to print every 5th iteration
options <- list(trace = 5)
# Optimize cost function
BP_pred <- nlminb(start = ANN_par,
objective = ANN_cost,
gradient = ANN_grad,
hessian = NULL,
L_i_size = L_i_size,
L_h_size = L_h_size,
L_o_size = L_o_size,
x = x, y = y,
lambda = 1,
control = options)
# Retrieve the theta vector found through backpropagation
BP_par <- BP_pred$par
# Separate theta vector from backpropagation into thetas
theta1_train <- matrix(data = BP_par[1:(L_h_size * (L_i_size + 1))], nrow = L_h_size)
theta2_train <- matrix(data = BP_par[(1 + (L_h_size * (L_i_size + 1))):length(BP_par)], nrow = L_o_size)
# Exploring the training fit on unsampled data
D_uns <- cleanData %>% filter(., Year < 2014, Position == "WR") %>% drop_na(.)
x_uns <- as.matrix(D_uns %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted))
# Standardize the unsampled training data
for (i in 1:ncol(x_uns)) {
x_uns[,i] <- standFun(x_uns[,i])
}
# Replace variables that are consistently zero and therefore yield NA when standardized with zero again
x_uns[is.na(x_uns)] <- 0
a_1 <- rbind(1, t(x_uns))
a_2 <- rbind(1, sigmoid(theta1_train %*% a_1))
a_3 <- sigmoid(theta2_train %*% a_2)
train_WR <- tibble("Code" = D_uns$Player.Code, "Name" = D_uns$Name, "Drafted" = D_uns$Drafted, "pred" = as.vector(a_3))
# Take the year 2014 as testing data
cleanData_WR_test <- cleanData %>% filter(., Position == "WR", Year == 2014) %>% drop_na(.)
x_test <- as.matrix(cleanData_WR_test %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted))
y_test <- pull(cleanData_WR_test %>% select(., Drafted))
# Standardize the testing data and replace NA with zero
for (i in 1:ncol(x_test)) {
x_test[,i] <- standFun(x_test[,i])
}
x_test[is.na(x_test)] <- 0
# Make predictions with the thetas found in backpropagation
a_1 <- rbind(1, t(x_test))
a_2 <- rbind(1, sigmoid(theta1_train %*% a_1))
a_3 <- sigmoid(theta2_train %*% a_2)
# Summarize the results
pred_WR <- tibble("Code" = cleanData_WR_test$Player.Code, "Name" = cleanData_WR_test$Name, "pred" = as.vector(a_3), "Drafted" = cleanData_WR_test$Drafted)
# ANN for all positions ----
# Select years 2007 through 2013 as training data
cleanData_all <- cleanData %>% filter(., Year < 2014) %>% drop_na(.)
# !!! Use the following line when working with ANY sampled data !!!
# cleanData_all <- cleanData_s %>% filter(., Year < 2014) %>% drop_na(.)
# Drop unimportant variables
x <- as.matrix(cleanData_all %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted))
# !!! Use the following line ONLY when working with SMOTE sampled data and LEAVE COMMENTED otherwise !!!
# x <- as.matrix(cleanData_all %>% select(., -Player.Code, -Name, -Position, -Year, -Drafted))
y <- as.integer(as.vector(cleanData_all$Drafted))
# Standardize the training data
for (i in 1:ncol(x)) {
x[,i] <- standFun(x[,i])
}
# Replace variables that are consistently zero and therefore yield NA when standardized with zero again
x[is.na(x)] <- 0
# Parameters of ANN
L_i_size <- ncol(x)
L_h_size <- 10
L_o_size <- 1
n <- length(y)
theta1 <- matrix(1, nrow = L_h_size, ncol = L_i_size + 1)
theta2 <- matrix(1, nrow = L_o_size, ncol = L_h_size + 1)
ANN_par <- c(as.vector(theta1), as.vector(theta2))
# Make verbose to print every 5th iteration
options <- list(trace = 5)
# Optimize cost function
BP_pred <- nlminb(start = ANN_par,
objective = ANN_cost,
gradient = ANN_grad,
hessian = NULL,
L_i_size = L_i_size,
L_h_size = L_h_size,
L_o_size = L_o_size,
x = x, y = y,
lambda = 1,
control = options)
# Retrieve the theta vector found through backpropagation
BP_par <- BP_pred$par
# Separate theta vector from backpropagation into thetas
theta1_train <- matrix(data = BP_par[1:(L_h_size * (L_i_size + 1))], nrow = L_h_size)
theta2_train <- matrix(data = BP_par[(1 + (L_h_size * (L_i_size + 1))):length(BP_par)], nrow = L_o_size)
# Exploring the training fit on unsampled data
D_uns <- cleanData %>% filter(., Year < 2014) %>% drop_na(.)
x_uns <- as.matrix(D_uns %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted))
# Standardize the unsampled training data
for (i in 1:ncol(x_uns)) {
x_uns[,i] <- standFun(x_uns[,i])
}
# Replace variables that are consistently zero and therefore yield NA when standardized with zero again
x_uns[is.na(x_uns)] <- 0
a_1 <- rbind(1, t(x_uns))
a_2 <- rbind(1, sigmoid(theta1_train %*% a_1))
a_3 <- sigmoid(theta2_train %*% a_2)
train_all <- tibble("Code" = D_uns$Player.Code, "Name" = D_uns$Name, "Drafted" = D_uns$Drafted, "pred" = as.vector(a_3))
# Take the year 2014 as testing data
cleanData_all_test <- cleanData %>% filter(., Year == 2014) %>% drop_na(.)
x_test <- as.matrix(cleanData_all_test %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted))
y_test <- pull(cleanData_all_test %>% select(., Drafted))
# Standardize the testing data and replace NA with zero
for (i in 1:ncol(x_test)) {
x_test[,i] <- standFun(x_test[,i])
}
x_test[is.na(x_test)] <- 0
# Make predictions with the thetas found in backpropagation
a_1 <- rbind(1, t(x_test))
a_2 <- rbind(1, sigmoid(theta1_train %*% a_1))
a_3 <- sigmoid(theta2_train %*% a_2)
# Summarize the results
pred_all <- tibble("Code" = cleanData_all_test$Player.Code, "Name" = cleanData_all_test$Name, "pred" = as.vector(a_3), "Drafted" = cleanData_all_test$Drafted)
# Aggregate Results ----
resultsComb_separate <- tibble("Player.Code" = c(pred_QB$Code, pred_RB$Code, pred_WR$Code),
"Name" = c(pred_QB$Name, pred_RB$Name, pred_WR$Name),
"P" = c(pred_QB$pred, pred_RB$pred, pred_WR$pred),
"Pred" = ifelse(c(pred_QB$pred, pred_RB$pred, pred_WR$pred) >= 0.5, 1, 0),
"Drafted" = c(pred_QB$Drafted, pred_RB$Drafted, pred_WR$Drafted))
resultsComb_separate <- resultsComb_separate %>% mutate(., "error" = ifelse(Pred != Drafted, 1, 0),
"TP" = ifelse(Pred == Drafted & Drafted == 1, 1, 0),
"FP" = ifelse(Pred != Drafted & Drafted == 0, 1, 0),
"TN" = ifelse(Pred == Drafted & Drafted == 0, 1, 0),
"FN" = ifelse(Pred != Drafted & Drafted == 1, 1, 0))
resultsComb_all <- tibble("Player.Code" = pred_all$Code,
"Name" = pred_all$Name,
"P" = pred_all$pred,
"Pred" = ifelse(pred_all$pred >= 0.5, 1, 0),
"Drafted" = pred_all$Drafted)
resultsComb_all <- resultsComb_all %>% mutate(., "error" = ifelse(Pred != Drafted, 1, 0),
"TP" = ifelse(Pred == Drafted & Drafted == 1, 1, 0),
"FP" = ifelse(Pred != Drafted & Drafted == 0, 1, 0),
"TN" = ifelse(Pred == Drafted & Drafted == 0, 1, 0),
"FN" = ifelse(Pred != Drafted & Drafted == 1, 1, 0))
# !!! Note: row index has to be changed depending on the sampling used !!!
rowInd <- 1 # no sampling
# rowInd <- 2 # oversampling
# rowInd <- 3 # undersampling
# rowInd <- 4 # Rose_both
# rowInd <- 5 # SMOTE
# Fill in training fit data
ANNPerfMeas[rowInd, "QB_TP"] = sum(ifelse(ifelse(train_QB$pred >= 0.5, 1, 0) == train_QB$Drafted & train_QB$Drafted == 1, 1, 0))
ANNPerfMeas[rowInd, "QB_TN"] = sum(ifelse(ifelse(train_QB$pred >= 0.5, 1, 0) == train_QB$Drafted & train_QB$Drafted == 0, 1, 0))
ANNPerfMeas[rowInd, "QB_FP"] = sum(ifelse(ifelse(train_QB$pred >= 0.5, 1, 0) != train_QB$Drafted & train_QB$Drafted == 1, 1, 0))
ANNPerfMeas[rowInd, "QB_FN"] = sum(ifelse(ifelse(train_QB$pred >= 0.5, 1, 0) != train_QB$Drafted & train_QB$Drafted == 0, 1, 0))
ANNPerfMeas[rowInd, "RB_TP"] = sum(ifelse(ifelse(train_RB$pred >= 0.5, 1, 0) == train_RB$Drafted & train_RB$Drafted == 1, 1, 0))
ANNPerfMeas[rowInd, "RB_TN"] = sum(ifelse(ifelse(train_RB$pred >= 0.5, 1, 0) == train_RB$Drafted & train_RB$Drafted == 0, 1, 0))
ANNPerfMeas[rowInd, "RB_FP"] = sum(ifelse(ifelse(train_RB$pred >= 0.5, 1, 0) != train_RB$Drafted & train_RB$Drafted == 1, 1, 0))
ANNPerfMeas[rowInd, "RB_FN"] = sum(ifelse(ifelse(train_RB$pred >= 0.5, 1, 0) != train_RB$Drafted & train_RB$Drafted == 0, 1, 0))
ANNPerfMeas[rowInd, "WR_TP"] = sum(ifelse(ifelse(train_WR$pred >= 0.5, 1, 0) == train_WR$Drafted & train_WR$Drafted == 1, 1, 0))
ANNPerfMeas[rowInd, "WR_TN"] = sum(ifelse(ifelse(train_WR$pred >= 0.5, 1, 0) == train_WR$Drafted & train_WR$Drafted == 0, 1, 0))
ANNPerfMeas[rowInd, "WR_FP"] = sum(ifelse(ifelse(train_WR$pred >= 0.5, 1, 0) != train_WR$Drafted & train_WR$Drafted == 1, 1, 0))
ANNPerfMeas[rowInd, "WR_FN"] = sum(ifelse(ifelse(train_WR$pred >= 0.5, 1, 0) != train_WR$Drafted & train_WR$Drafted == 0, 1, 0))
ANNPerfMeas[rowInd, "Together_TP"] = sum(ifelse(ifelse(train_all$pred >= 0.5, 1, 0) == train_all$Drafted & train_all$Drafted == 1, 1, 0))
ANNPerfMeas[rowInd, "Together_TN"] = sum(ifelse(ifelse(train_all$pred >= 0.5, 1, 0) == train_all$Drafted & train_all$Drafted == 0, 1, 0))
ANNPerfMeas[rowInd, "Together_FP"] = sum(ifelse(ifelse(train_all$pred >= 0.5, 1, 0) != train_all$Drafted & train_all$Drafted == 1, 1, 0))
ANNPerfMeas[rowInd, "Together_FN"] = sum(ifelse(ifelse(train_all$pred >= 0.5, 1, 0) != train_all$Drafted & train_all$Drafted == 0, 1, 0))
# Fill in testing fit data
ANNPerfMeas2014[rowInd, "QB_TP"] = sum(ifelse(ifelse(pred_QB$pred >= 0.5, 1, 0) == pred_QB$Drafted & pred_QB$Drafted == 1, 1, 0))
ANNPerfMeas2014[rowInd, "QB_TN"] = sum(ifelse(ifelse(pred_QB$pred >= 0.5, 1, 0) == pred_QB$Drafted & pred_QB$Drafted == 0, 1, 0))
ANNPerfMeas2014[rowInd, "QB_FP"] = sum(ifelse(ifelse(pred_QB$pred >= 0.5, 1, 0) != pred_QB$Drafted & pred_QB$Drafted == 1, 1, 0))
ANNPerfMeas2014[rowInd, "QB_FN"] = sum(ifelse(ifelse(pred_QB$pred >= 0.5, 1, 0) != pred_QB$Drafted & pred_QB$Drafted == 0, 1, 0))
ANNPerfMeas2014[rowInd, "RB_TP"] = sum(ifelse(ifelse(pred_RB$pred >= 0.5, 1, 0) == pred_RB$Drafted & pred_RB$Drafted == 1, 1, 0))
ANNPerfMeas2014[rowInd, "RB_TN"] = sum(ifelse(ifelse(pred_RB$pred >= 0.5, 1, 0) == pred_RB$Drafted & pred_RB$Drafted == 0, 1, 0))
ANNPerfMeas2014[rowInd, "RB_FP"] = sum(ifelse(ifelse(pred_RB$pred >= 0.5, 1, 0) != pred_RB$Drafted & pred_RB$Drafted == 1, 1, 0))
ANNPerfMeas2014[rowInd, "RB_FN"] = sum(ifelse(ifelse(pred_RB$pred >= 0.5, 1, 0) != pred_RB$Drafted & pred_RB$Drafted == 0, 1, 0))
ANNPerfMeas2014[rowInd, "WR_TP"] = sum(ifelse(ifelse(pred_WR$pred >= 0.5, 1, 0) == pred_WR$Drafted & pred_WR$Drafted == 1, 1, 0))
ANNPerfMeas2014[rowInd, "WR_TN"] = sum(ifelse(ifelse(pred_WR$pred >= 0.5, 1, 0) == pred_WR$Drafted & pred_WR$Drafted == 0, 1, 0))
ANNPerfMeas2014[rowInd, "WR_FP"] = sum(ifelse(ifelse(pred_WR$pred >= 0.5, 1, 0) != pred_WR$Drafted & pred_WR$Drafted == 1, 1, 0))
ANNPerfMeas2014[rowInd, "WR_FN"] = sum(ifelse(ifelse(pred_WR$pred >= 0.5, 1, 0) != pred_WR$Drafted & pred_WR$Drafted == 0, 1, 0))
ANNPerfMeas2014[rowInd, "Together_TP"] = sum(ifelse(ifelse(pred_all$pred >= 0.5, 1, 0) == pred_all$Drafted & pred_all$Drafted == 1, 1, 0))
ANNPerfMeas2014[rowInd, "Together_TN"] = sum(ifelse(ifelse(pred_all$pred >= 0.5, 1, 0) == pred_all$Drafted & pred_all$Drafted == 0, 1, 0))
ANNPerfMeas2014[rowInd, "Together_FP"] = sum(ifelse(ifelse(pred_all$pred >= 0.5, 1, 0) != pred_all$Drafted & pred_all$Drafted == 1, 1, 0))
ANNPerfMeas2014[rowInd, "Together_FN"] = sum(ifelse(ifelse(pred_all$pred >= 0.5, 1, 0) != pred_all$Drafted & pred_all$Drafted == 0, 1, 0))
# Save the results for model comparison
save(ANNPerfMeas2014, file = "../Data/PerformanceMeasurement/ANNPerfMeas2014.Rdata")
save(ANNPerfMeas, file = "../Data/PerformanceMeasurement/ANNPerfMeas.Rdata")
<file_sep>/Project_Scripts/ClassificationTree.R
# Load required packages
library(dplyr) # data wrangling
library(rpart) # performing regression trees
library(rpart.plot) # plotting regression trees
library(tidyverse)
library(rattle) # Fancy tree plot
library(RColorBrewer) # Color selection for fancy tree plot
load("../Data/CleanData/CleanClass2007to2014_3.Rdata")
load("../Data/CleanData/CleanClass2007to2013_3_oversampling.Rdata")
load("../Data/CleanData/CleanClass2007to2013_3_undersampling.Rdata")
load("../Data/CleanData/CleanClass2007to2013_3_rose.both.Rdata")
load("../Data/CleanData/CleanClass2007to2013_3_smote.Rdata")
ClassificationTreePerfMeas = data.frame(Method = character(), Sampling = character(), QB_TP = integer(), QB_TN = integer(), QB_FP = integer(), QB_FN = integer(),
WR_TP = integer(), WR_TN = integer(), WR_FP = integer(), WR_FN = integer(),
RB_TP = integer(), RB_TN = integer(), RB_FP = integer(), RB_FN = integer(),
Together_TP = integer(), Together_TN = integer(), Together_FP = integer(), Together_FN = integer(), stringsAsFactors = FALSE)
ClassificationTreePerfMeas[1,2] = "no_sampling"
ClassificationTreePerfMeas[2,2] = "oversampling"
ClassificationTreePerfMeas[3,2] = "undersampling"
ClassificationTreePerfMeas[4,2] = "Rose_both"
ClassificationTreePerfMeas[5,2] = "Smote"
ClassificationTreePerfMeas$Method = "ClassificationTree"
# We will do the next steps 5 times (e.g. "1. No Splitting" does the same thing as "2. Oversampling"), but using different data for training the model
# In other words, this is the cross-validation of the sampling methods. The reason for doing it a couple of times instead of looping or functioning it
# is the easier availability of the steps in between in case of further processing them.
# Part 6 will be the testing of the models on the 2014 data.
## 1. No Sampling ------------------------
# Splitting the data
# We use all the available information just before the 2014 NFL-Draft, in order to train the model and then apply it on the data for 2014.
DtrainNS = CleanClass2007to2014_3 %>%
filter(Year != 2014)
DtestNS = CleanClass2007to2014_3 %>%
filter(Year == 2014)
# QB ---------------------------
# Predicting the likelyhood of a QB being picked in the draft
DtrainQBNS = DtrainNS %>%
filter(Position == "QB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
DtestQBNS = DtestNS %>%
filter(Position == "QB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeQBNS = rpart(
formula = Drafted ~ .,
data = DtrainQBNS,
method = "class")
CheckList = as.data.frame(cbind(DtrainQBNS$Drafted, predict(ClassTreeQBNS, DtrainQBNS)))
CheckListQBNS = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(QB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(QB_TP=ifelse(Y==QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_TN=ifelse(Y==QB_Pred,ifelse(QB_Pred==0,1,0),0)) %>%
mutate(QB_FP=ifelse(Y!=QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_FN=ifelse(Y!=QB_Pred,ifelse(QB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[1,"QB_TP"] = sum(CheckListQBNS$QB_TP)
ClassificationTreePerfMeas[1,"QB_TN"] = sum(CheckListQBNS$QB_TN)
ClassificationTreePerfMeas[1,"QB_FP"] = sum(CheckListQBNS$QB_FP)
ClassificationTreePerfMeas[1,"QB_FN"] = sum(CheckListQBNS$QB_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeQBNS, main="Classification Tree for QB's with unsampled data", sub="", cex=0.5)
# WR ---------------------------
# Predicting the likelyhood of a WR being picked in the draft
DtrainWRNS = DtrainNS %>%
filter(Position == "WR") %>%
select(-c(Player.Code, Name, Class, Position, Year))
DtestWRNS = DtestNS %>%
filter(Position == "WR") %>%
select(-c(Player.Code, Name, Class, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeWRNS = rpart(
formula = Drafted ~ .,
data = DtrainWRNS,
method = "class")
CheckList = as.data.frame(cbind(DtrainWRNS$Drafted, predict(ClassTreeWRNS, DtrainWRNS)))
CheckListWRNS = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(WR_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(WR_TP=ifelse(Y==WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_TN=ifelse(Y==WR_Pred,ifelse(WR_Pred==0,1,0),0)) %>%
mutate(WR_FP=ifelse(Y!=WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_FN=ifelse(Y!=WR_Pred,ifelse(WR_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[1,"WR_TP"] = sum(CheckListWRNS$WR_TP)
ClassificationTreePerfMeas[1,"WR_TN"] = sum(CheckListWRNS$WR_TN)
ClassificationTreePerfMeas[1,"WR_FP"] = sum(CheckListWRNS$WR_FP)
ClassificationTreePerfMeas[1,"WR_FN"] = sum(CheckListWRNS$WR_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeWRNS, main="Classification Tree for WR's with unsampled data", sub="", cex=0.5)
# RB ---------------------------
# Predicting the likelyhood of a RB being picked in the draft
DtrainRBNS = DtrainNS %>%
filter(Position == "RB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
DtestRBNS = DtestNS %>%
filter(Position == "RB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeRBNS = rpart(
formula = Drafted ~ .,
data = DtrainRBNS,
method = "class")
CheckList = as.data.frame(cbind(DtrainRBNS$Drafted, predict(ClassTreeRBNS, DtrainRBNS)))
CheckListRBNS = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(RB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(RB_TP=ifelse(Y==RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_TN=ifelse(Y==RB_Pred,ifelse(RB_Pred==0,1,0),0)) %>%
mutate(RB_FP=ifelse(Y!=RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_FN=ifelse(Y!=RB_Pred,ifelse(RB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[1,"RB_TP"] = sum(CheckListRBNS$RB_TP)
ClassificationTreePerfMeas[1,"RB_TN"] = sum(CheckListRBNS$RB_TN)
ClassificationTreePerfMeas[1,"RB_FP"] = sum(CheckListRBNS$RB_FP)
ClassificationTreePerfMeas[1,"RB_FN"] = sum(CheckListRBNS$RB_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeRBNS, main="Classification Tree for RB's with unsampled data", sub="", cex=0.5)
# Together ---------------------------
# Predicting the likelyhood of QB/RB/WR together for being picked in the draft
DtrainTogetherNS = DtrainNS %>%
select(-c(Player.Code, Name, Class, Year))
DtestTogetherNS = DtestNS %>%
select(-c(Player.Code, Name, Class, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeTogetherNS = rpart(
formula = Drafted ~ .,
data = DtrainTogetherNS,
method = "class")
CheckList = as.data.frame(cbind(DtrainTogetherNS$Drafted, predict(ClassTreeTogetherNS, DtrainTogetherNS)))
CheckListTogetherNS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(Together_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(Together_TP=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_TN=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==0,1,0),0)) %>%
mutate(Together_FP=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_FN=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[1,"Together_TP"] = sum(CheckListTogetherNS$Together_TP)
ClassificationTreePerfMeas[1,"Together_TN"] = sum(CheckListTogetherNS$Together_TN)
ClassificationTreePerfMeas[1,"Together_FP"] = sum(CheckListTogetherNS$Together_FP)
ClassificationTreePerfMeas[1,"Together_FN"] = sum(CheckListTogetherNS$Together_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeTogetherNS, main="Classification Tree for QB/WR/RB together with unsampled data", sub="", cex=0.5)
## 2. Oversampling ------------------------
# Splitting the data
DtrainOS = CleanClass2007to2014_3_oversampling %>%
filter(Year != 2014)
DtestOS = CleanClass2007to2014_3_oversampling %>%
filter(Year == 2014)
# QB ---------------------------
# Predicting the likelyhood of a QB being picked in the draft
DtrainQBOS = DtrainOS %>%
filter(Position == "QB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
DtestQBOS = DtestOS %>%
filter(Position == "QB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeQBOS = rpart(
formula = Drafted ~ .,
data = DtrainQBOS,
method = "class")
CheckList = as.data.frame(cbind(DtrainQBNS$Drafted, predict(ClassTreeQBOS, DtrainQBNS)))
CheckListQBOS = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(QB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(QB_TP=ifelse(Y==QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_TN=ifelse(Y==QB_Pred,ifelse(QB_Pred==0,1,0),0)) %>%
mutate(QB_FP=ifelse(Y!=QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_FN=ifelse(Y!=QB_Pred,ifelse(QB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[2,"QB_TP"] = sum(CheckListQBOS$QB_TP)
ClassificationTreePerfMeas[2,"QB_TN"] = sum(CheckListQBOS$QB_TN)
ClassificationTreePerfMeas[2,"QB_FP"] = sum(CheckListQBOS$QB_FP)
ClassificationTreePerfMeas[2,"QB_FN"] = sum(CheckListQBOS$QB_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeQBOS, main="Classification Tree for QB's with oversampled data", sub="", cex=0.5)
# WR ---------------------------
# Predicting the likelyhood of a WR being picked in the draft
DtrainWROS = DtrainOS %>%
filter(Position == "WR") %>%
select(-c(Player.Code, Name, Class, Position, Year))
DtestWROS = DtestOS %>%
filter(Position == "WR") %>%
select(-c(Player.Code, Name, Class, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeWROS = rpart(
formula = Drafted ~ .,
data = DtrainWROS,
method = "class")
CheckList = as.data.frame(cbind(DtrainWRNS$Drafted, predict(ClassTreeWROS, DtrainWRNS)))
CheckListWROS = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(WR_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(WR_TP=ifelse(Y==WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_TN=ifelse(Y==WR_Pred,ifelse(WR_Pred==0,1,0),0)) %>%
mutate(WR_FP=ifelse(Y!=WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_FN=ifelse(Y!=WR_Pred,ifelse(WR_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[2,"WR_TP"] = sum(CheckListWROS$WR_TP)
ClassificationTreePerfMeas[2,"WR_TN"] = sum(CheckListWROS$WR_TN)
ClassificationTreePerfMeas[2,"WR_FP"] = sum(CheckListWROS$WR_FP)
ClassificationTreePerfMeas[2,"WR_FN"] = sum(CheckListWROS$WR_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeWROS, main="Classification Tree for WR's with oversampled data", sub="", cex=0.5)
# RB ---------------------------
# Predicting the likelyhood of a RB being picked in the draft
DtrainRBOS = DtrainOS %>%
filter(Position == "RB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
DtestRBOS = DtestOS %>%
filter(Position == "RB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeRBOS = rpart(
formula = Drafted ~ .,
data = DtrainRBOS,
method = "class")
CheckList = as.data.frame(cbind(DtrainRBNS$Drafted, predict(ClassTreeRBOS, DtrainRBNS)))
CheckListRBOS = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(RB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(RB_TP=ifelse(Y==RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_TN=ifelse(Y==RB_Pred,ifelse(RB_Pred==0,1,0),0)) %>%
mutate(RB_FP=ifelse(Y!=RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_FN=ifelse(Y!=RB_Pred,ifelse(RB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[2,"RB_TP"] = sum(CheckListRBOS$RB_TP)
ClassificationTreePerfMeas[2,"RB_TN"] = sum(CheckListRBOS$RB_TN)
ClassificationTreePerfMeas[2,"RB_FP"] = sum(CheckListRBOS$RB_FP)
ClassificationTreePerfMeas[2,"RB_FN"] = sum(CheckListRBOS$RB_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeRBOS, main="Classification Tree for RB's with oversampled data", sub="", cex=0.5)
# Together ---------------------------
# Predicting the likelyhood of QB/RB/WR together for being picked in the draft
DtrainTogetherOS = DtrainOS %>%
select(-c(Player.Code, Name, Class, Year))
DtestTogetherOS = DtestOS %>%
select(-c(Player.Code, Name, Class, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeTogetherOS = rpart(
formula = Drafted ~ .,
data = DtrainTogetherOS,
method = "class")
CheckList = as.data.frame(cbind(DtrainTogetherNS$Drafted, predict(ClassTreeTogetherOS, DtrainTogetherNS)))
CheckListTogetherOS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(Together_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(Together_TP=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_TN=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==0,1,0),0)) %>%
mutate(Together_FP=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_FN=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[2,"Together_TP"] = sum(CheckListTogetherOS$Together_TP)
ClassificationTreePerfMeas[2,"Together_TN"] = sum(CheckListTogetherOS$Together_TN)
ClassificationTreePerfMeas[2,"Together_FP"] = sum(CheckListTogetherOS$Together_FP)
ClassificationTreePerfMeas[2,"Together_FN"] = sum(CheckListTogetherOS$Together_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeTogetherOS, main="Classification Tree for QB/WR/RB together with oversampled data", sub="", cex=0.5)
## 3. Undersampling ------------------------
# Splitting the data
# We use all the available information just before the 2014 NFL-Draft, in order to train the model and then apply it on the data for 2014.
DtrainUS = CleanClass2007to2014_3_undersampling %>%
filter(Year != 2014)
DtestUS = CleanClass2007to2014_3_undersampling %>%
filter(Year == 2014)
# QB ---------------------------
# Predicting the likelyhood of a QB being picked in the draft
DtrainQBUS = DtrainUS %>%
filter(Position == "QB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
DtestQBUS = DtestUS %>%
filter(Position == "QB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeQBUS = rpart(
formula = Drafted ~ .,
data = DtrainQBUS,
method = "class")
CheckList = as.data.frame(cbind(DtrainQBNS$Drafted, predict(ClassTreeQBUS, DtrainQBNS)))
CheckListQBUS = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(QB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(QB_TP=ifelse(Y==QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_TN=ifelse(Y==QB_Pred,ifelse(QB_Pred==0,1,0),0)) %>%
mutate(QB_FP=ifelse(Y!=QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_FN=ifelse(Y!=QB_Pred,ifelse(QB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[3,"QB_TP"] = sum(CheckListQBUS$QB_TP)
ClassificationTreePerfMeas[3,"QB_TN"] = sum(CheckListQBUS$QB_TN)
ClassificationTreePerfMeas[3,"QB_FP"] = sum(CheckListQBUS$QB_FP)
ClassificationTreePerfMeas[3,"QB_FN"] = sum(CheckListQBUS$QB_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeQBUS, main="Classification Tree for QB's with undersampled data", sub="", cex=0.5)
# WR ---------------------------
# Predicting the likelyhood of a WR being picked in the draft
DtrainWRUS = DtrainUS %>%
filter(Position == "WR") %>%
select(-c(Player.Code, Name, Class, Position, Year))
DtestWRUS = DtestUS %>%
filter(Position == "WR") %>%
select(-c(Player.Code, Name, Class, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeWRUS = rpart(
formula = Drafted ~ .,
data = DtrainWRUS,
method = "class")
CheckList = as.data.frame(cbind(DtrainWRNS$Drafted, predict(ClassTreeWRUS, DtrainWRNS)))
CheckListWRUS = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(WR_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(WR_TP=ifelse(Y==WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_TN=ifelse(Y==WR_Pred,ifelse(WR_Pred==0,1,0),0)) %>%
mutate(WR_FP=ifelse(Y!=WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_FN=ifelse(Y!=WR_Pred,ifelse(WR_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[3,"WR_TP"] = sum(CheckListWRUS$WR_TP)
ClassificationTreePerfMeas[3,"WR_TN"] = sum(CheckListWRUS$WR_TN)
ClassificationTreePerfMeas[3,"WR_FP"] = sum(CheckListWRUS$WR_FP)
ClassificationTreePerfMeas[3,"WR_FN"] = sum(CheckListWRUS$WR_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeWRUS, main="Classification Tree for WR's with undersampled data", sub="", cex=0.5)
# RB ---------------------------
# Predicting the likelyhood of a RB being picked in the draft
DtrainRBUS = DtrainUS %>%
filter(Position == "RB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
DtestRBUS = DtestUS %>%
filter(Position == "RB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeRBUS = rpart(
formula = Drafted ~ .,
data = DtrainRBUS,
method = "class")
CheckList = as.data.frame(cbind(DtrainRBNS$Drafted, predict(ClassTreeRBUS, DtrainRBNS)))
CheckListRBUS = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(RB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(RB_TP=ifelse(Y==RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_TN=ifelse(Y==RB_Pred,ifelse(RB_Pred==0,1,0),0)) %>%
mutate(RB_FP=ifelse(Y!=RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_FN=ifelse(Y!=RB_Pred,ifelse(RB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[3,"RB_TP"] = sum(CheckListRBUS$RB_TP)
ClassificationTreePerfMeas[3,"RB_TN"] = sum(CheckListRBUS$RB_TN)
ClassificationTreePerfMeas[3,"RB_FP"] = sum(CheckListRBUS$RB_FP)
ClassificationTreePerfMeas[3,"RB_FN"] = sum(CheckListRBUS$RB_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeRBUS, main="Classification Tree for RB's with undersampled data", sub="", cex=0.5)
# Together ---------------------------
# Predicting the likelyhood of QB/RB/WR together for being picked in the draft
DtrainTogetherUS = DtrainUS %>%
select(-c(Player.Code, Name, Class, Year))
DtestTogetherUS = DtestUS %>%
select(-c(Player.Code, Name, Class, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeTogetherUS = rpart(
formula = Drafted ~ .,
data = DtrainTogetherUS,
method = "class")
CheckList = as.data.frame(cbind(DtrainTogetherNS$Drafted, predict(ClassTreeTogetherUS, DtrainTogetherNS)))
CheckListTogetherUS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(Together_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(Together_TP=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_TN=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==0,1,0),0)) %>%
mutate(Together_FP=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_FN=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[3,"Together_TP"] = sum(CheckListTogetherUS$Together_TP)
ClassificationTreePerfMeas[3,"Together_TN"] = sum(CheckListTogetherUS$Together_TN)
ClassificationTreePerfMeas[3,"Together_FP"] = sum(CheckListTogetherUS$Together_FP)
ClassificationTreePerfMeas[3,"Together_FN"] = sum(CheckListTogetherUS$Together_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeTogetherUS, main="Classification Tree for QB/WR/RB together with undersampled data", sub="", cex=0.5)
## 4. Rose_Both------------------------
# Splitting the data
# We use all the available information just before the 2014 NFL-Draft, in order to train the model and then apply it on the data for 2014.
DtrainRO = CleanClass2007to2014_3_Rose.both %>%
filter(Year != 2014)
DtestRO = CleanClass2007to2014_3_Rose.both %>%
filter(Year == 2014)
# QB ---------------------------
# Predicting the likelyhood of a QB being picked in the draft
DtrainQBRO = DtrainRO %>%
filter(Position == "QB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
DtestQBRO = DtestRO %>%
filter(Position == "QB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeQBRO = rpart(
formula = Drafted ~ .,
data = DtrainQBRO,
method = "class")
CheckList = as.data.frame(cbind(DtrainQBNS$Drafted, predict(ClassTreeQBRO, DtrainQBNS)))
CheckListQBRO = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(QB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(QB_TP=ifelse(Y==QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_TN=ifelse(Y==QB_Pred,ifelse(QB_Pred==0,1,0),0)) %>%
mutate(QB_FP=ifelse(Y!=QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_FN=ifelse(Y!=QB_Pred,ifelse(QB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[4,"QB_TP"] = sum(CheckListQBRO$QB_TP)
ClassificationTreePerfMeas[4,"QB_TN"] = sum(CheckListQBRO$QB_TN)
ClassificationTreePerfMeas[4,"QB_FP"] = sum(CheckListQBRO$QB_FP)
ClassificationTreePerfMeas[4,"QB_FN"] = sum(CheckListQBRO$QB_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeQBRO, main="Classification Tree for QB's with Rose Both sampled data", sub="", cex=0.5)
# WR ---------------------------
# Predicting the likelyhood of a WR being picked in the draft
DtrainWRRO = DtrainRO %>%
filter(Position == "WR") %>%
select(-c(Player.Code, Name, Class, Position, Year))
DtestWRRO = DtestRO %>%
filter(Position == "WR") %>%
select(-c(Player.Code, Name, Class, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeWRRO = rpart(
formula = Drafted ~ .,
data = DtrainWRRO,
method = "class")
CheckList = as.data.frame(cbind(DtrainWRNS$Drafted, predict(ClassTreeWRRO, DtrainWRNS)))
CheckListWRRO = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(WR_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(WR_TP=ifelse(Y==WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_TN=ifelse(Y==WR_Pred,ifelse(WR_Pred==0,1,0),0)) %>%
mutate(WR_FP=ifelse(Y!=WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_FN=ifelse(Y!=WR_Pred,ifelse(WR_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[4,"WR_TP"] = sum(CheckListWRRO$WR_TP)
ClassificationTreePerfMeas[4,"WR_TN"] = sum(CheckListWRRO$WR_TN)
ClassificationTreePerfMeas[4,"WR_FP"] = sum(CheckListWRRO$WR_FP)
ClassificationTreePerfMeas[4,"WR_FN"] = sum(CheckListWRRO$WR_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeWRRO, main="Classification Tree for WR's with Rose Both sampled data", sub="", cex=0.5)
# RB ---------------------------
# Predicting the likelyhood of a RB being picked in the draft
DtrainRBRO = DtrainRO %>%
filter(Position == "RB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
DtestRBRO = DtestRO %>%
filter(Position == "RB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeRBRO = rpart(
formula = Drafted ~ .,
data = DtrainRBRO,
method = "class")
CheckList = as.data.frame(cbind(DtrainRBNS$Drafted, predict(ClassTreeRBRO, DtrainRBNS)))
CheckListRBRO = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(RB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(RB_TP=ifelse(Y==RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_TN=ifelse(Y==RB_Pred,ifelse(RB_Pred==0,1,0),0)) %>%
mutate(RB_FP=ifelse(Y!=RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_FN=ifelse(Y!=RB_Pred,ifelse(RB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[4,"RB_TP"] = sum(CheckListRBRO$RB_TP)
ClassificationTreePerfMeas[4,"RB_TN"] = sum(CheckListRBRO$RB_TN)
ClassificationTreePerfMeas[4,"RB_FP"] = sum(CheckListRBRO$RB_FP)
ClassificationTreePerfMeas[4,"RB_FN"] = sum(CheckListRBRO$RB_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeRBRO, main="Classification Tree for RB's with Rose Both sampled data", sub="", cex=0.5)
# Together ---------------------------
# Predicting the likelyhood of QB/RB/WR together for being picked in the draft
DtrainTogetherRO = DtrainRO %>%
select(-c(Player.Code, Name, Class, Year))
DtestTogetherRO = DtestRO %>%
select(-c(Player.Code, Name, Class, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeTogetherRO = rpart(
formula = Drafted ~ .,
data = DtrainTogetherRO,
method = "class")
CheckList = as.data.frame(cbind(DtrainTogetherNS$Drafted, predict(ClassTreeTogetherRO, DtrainTogetherNS)))
CheckListTogetherRO = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(Together_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(Together_TP=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_TN=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==0,1,0),0)) %>%
mutate(Together_FP=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_FN=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[4,"Together_TP"] = sum(CheckListTogetherRO$Together_TP)
ClassificationTreePerfMeas[4,"Together_TN"] = sum(CheckListTogetherRO$Together_TN)
ClassificationTreePerfMeas[4,"Together_FP"] = sum(CheckListTogetherRO$Together_FP)
ClassificationTreePerfMeas[4,"Together_FN"] = sum(CheckListTogetherRO$Together_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeTogetherRO, main="Classification Tree for QB/WR/RB together with Rose Both sampled data", sub="", cex=0.5)
## 5. Smote------------------------
# Splitting the data
# We use all the available information just before the 2014 NFL-Draft, in order to train the model and then apply it on the data for 2014.
DtrainSM = cleanData_smote %>%
filter(Year != 2014)
DtestSM = cleanData_smote %>%
filter(Year == 2014)
# QB ---------------------------
# Predicting the likelyhood of a QB being picked in the draft
DtrainQBSM = DtrainSM %>%
filter(Position == "QB") %>%
select(-c(Player.Code, Name, Position, Year))
DtestQBSM = DtestSM %>%
filter(Position == "QB") %>%
select(-c(Player.Code, Name, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeQBSM = rpart(
formula = Drafted ~ .,
data = DtrainQBSM,
method = "class")
CheckList = as.data.frame(cbind(DtrainQBNS$Drafted, predict(ClassTreeQBSM, DtrainQBNS)))
CheckListQBSM = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(QB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(QB_TP=ifelse(Y==QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_TN=ifelse(Y==QB_Pred,ifelse(QB_Pred==0,1,0),0)) %>%
mutate(QB_FP=ifelse(Y!=QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_FN=ifelse(Y!=QB_Pred,ifelse(QB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[5,"QB_TP"] = sum(CheckListQBSM$QB_TP)
ClassificationTreePerfMeas[5,"QB_TN"] = sum(CheckListQBSM$QB_TN)
ClassificationTreePerfMeas[5,"QB_FP"] = sum(CheckListQBSM$QB_FP)
ClassificationTreePerfMeas[5,"QB_FN"] = sum(CheckListQBSM$QB_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeQBSM, main="Classification Tree for QB's with smote sampled data", sub="", cex=0.5)
# WR ---------------------------
# Predicting the likelyhood of a WR being picked in the draft
DtrainWRSM = DtrainSM %>%
filter(Position == "WR") %>%
select(-c(Player.Code, Name, Position, Year))
DtestWRSM = DtestSM %>%
filter(Position == "WR") %>%
select(-c(Player.Code, Name, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeWRSM = rpart(
formula = Drafted ~ .,
data = DtrainWRSM,
method = "class")
CheckList = as.data.frame(cbind(DtrainWRNS$Drafted, predict(ClassTreeWRSM, DtrainWRNS)))
CheckListWRSM = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(WR_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(WR_TP=ifelse(Y==WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_TN=ifelse(Y==WR_Pred,ifelse(WR_Pred==0,1,0),0)) %>%
mutate(WR_FP=ifelse(Y!=WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_FN=ifelse(Y!=WR_Pred,ifelse(WR_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[5,"WR_TP"] = sum(CheckListWRSM$WR_TP)
ClassificationTreePerfMeas[5,"WR_TN"] = sum(CheckListWRSM$WR_TN)
ClassificationTreePerfMeas[5,"WR_FP"] = sum(CheckListWRSM$WR_FP)
ClassificationTreePerfMeas[5,"WR_FN"] = sum(CheckListWRSM$WR_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeWRSM, main="Classification Tree for WR's with smote sampled data", sub="", cex=0.5)
# RB ---------------------------
# Predicting the likelyhood of a RB being picked in the draft
DtrainRBSM = DtrainSM %>%
filter(Position == "RB") %>%
select(-c(Player.Code, Name, Position, Year))
DtestRBSM = DtestSM %>%
filter(Position == "RB") %>%
select(-c(Player.Code, Name, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeRBSM = rpart(
formula = Drafted ~ .,
data = DtrainRBSM,
method = "class")
CheckList = as.data.frame(cbind(DtrainRBNS$Drafted, predict(ClassTreeRBSM, DtrainRBNS)))
CheckListRBSM = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(RB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(RB_TP=ifelse(Y==RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_TN=ifelse(Y==RB_Pred,ifelse(RB_Pred==0,1,0),0)) %>%
mutate(RB_FP=ifelse(Y!=RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_FN=ifelse(Y!=RB_Pred,ifelse(RB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[5,"RB_TP"] = sum(CheckListRBSM$RB_TP)
ClassificationTreePerfMeas[5,"RB_TN"] = sum(CheckListRBSM$RB_TN)
ClassificationTreePerfMeas[5,"RB_FP"] = sum(CheckListRBSM$RB_FP)
ClassificationTreePerfMeas[5,"RB_FN"] = sum(CheckListRBSM$RB_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeRBSM, main="Classification Tree for RB's with smote sampled data", sub="", cex=0.5)
# Together ---------------------------
# Predicting the likelyhood of QB/RB/WR together for being picked in the draft
DtrainTogetherSM = DtrainSM %>%
select(-c(Player.Code, Name, Year))
DtestTogetherSM = DtestSM %>%
select(-c(Player.Code, Name, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeTogetherSM = rpart(
formula = Drafted ~ .,
data = DtrainTogetherSM,
method = "class")
CheckList = as.data.frame(cbind(DtrainTogetherNS$Drafted, predict(ClassTreeTogetherSM, DtrainTogetherNS)))
CheckListTogetherSM = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(Together_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(Together_TP=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_TN=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==0,1,0),0)) %>%
mutate(Together_FP=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_FN=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[5,"Together_TP"] = sum(CheckListTogetherSM$Together_TP)
ClassificationTreePerfMeas[5,"Together_TN"] = sum(CheckListTogetherSM$Together_TN)
ClassificationTreePerfMeas[5,"Together_FP"] = sum(CheckListTogetherSM$Together_FP)
ClassificationTreePerfMeas[5,"Together_FN"] = sum(CheckListTogetherSM$Together_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeTogetherSM, main="Classification Tree for QB/WR/RB together with smote sampled data", sub="", cex=0.5)
# Save the tibble for the Performance Measurement separately
save(ClassificationTreePerfMeas, file = "../Data/PerformanceMeasurement/ClassificationTreePerfMeas.Rdata")
# Uncomment to save a Plot of a tree (and update the name!)
# savePlotToFile(file.name = "QBtreeNS.jpg")
# 6. Predicting the 2014 NFL Draft---------------
# This is the Testing!
# Create an empty tibble
ClassificationTreePerfMeas14 = data.frame(Method = character(), Sampling = character(), QB_TP = integer(), QB_TN = integer(), QB_FP = integer(), QB_FN = integer(),
WR_TP = integer(), WR_TN = integer(), WR_FP = integer(), WR_FN = integer(),
RB_TP = integer(), RB_TN = integer(), RB_FP = integer(), RB_FN = integer(),
Together_TP = integer(), Together_TN = integer(), Together_FP = integer(), Together_FN = integer(), stringsAsFactors = FALSE)
ClassificationTreePerfMeas14[1,2] = "no_sampling"
ClassificationTreePerfMeas14[2,2] = "oversampling"
ClassificationTreePerfMeas14[3,2] = "undersampling"
ClassificationTreePerfMeas14[4,2] = "Rose_both"
ClassificationTreePerfMeas14[5,2] = "Smote"
ClassificationTreePerfMeas14$Method = "ClassificationTree"
# Unsampled 2014-----------------
# Unsampled model / QB
CheckList = as.data.frame(cbind(DtestQBNS$Drafted, predict(ClassTreeQBNS, DtestQBNS)))
CheckListQBNS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(QB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(QB_TP=ifelse(Drafted==QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_TN=ifelse(Drafted==QB_Pred,ifelse(QB_Pred==0,1,0),0)) %>%
mutate(QB_FP=ifelse(Drafted!=QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_FN=ifelse(Drafted!=QB_Pred,ifelse(QB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[1,"QB_TP"] = sum(CheckListQBNS$QB_TP)
ClassificationTreePerfMeas14[1,"QB_TN"] = sum(CheckListQBNS$QB_TN)
ClassificationTreePerfMeas14[1,"QB_FP"] = sum(CheckListQBNS$QB_FP)
ClassificationTreePerfMeas14[1,"QB_FN"] = sum(CheckListQBNS$QB_FN)
# Unsampled model / WR
CheckList = as.data.frame(cbind(DtestWRNS$Drafted, predict(ClassTreeWRNS, DtestWRNS)))
CheckListWRNS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(WR_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(WR_TP=ifelse(Drafted==WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_TN=ifelse(Drafted==WR_Pred,ifelse(WR_Pred==0,1,0),0)) %>%
mutate(WR_FP=ifelse(Drafted!=WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_FN=ifelse(Drafted!=WR_Pred,ifelse(WR_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[1,"WR_TP"] = sum(CheckListWRNS$WR_TP)
ClassificationTreePerfMeas14[1,"WR_TN"] = sum(CheckListWRNS$WR_TN)
ClassificationTreePerfMeas14[1,"WR_FP"] = sum(CheckListWRNS$WR_FP)
ClassificationTreePerfMeas14[1,"WR_FN"] = sum(CheckListWRNS$WR_FN)
# Unsampled model / RB
CheckList = as.data.frame(cbind(DtestRBNS$Drafted, predict(ClassTreeRBNS, DtestRBNS)))
CheckListRBNS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(RB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(RB_TP=ifelse(Drafted==RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_TN=ifelse(Drafted==RB_Pred,ifelse(RB_Pred==0,1,0),0)) %>%
mutate(RB_FP=ifelse(Drafted!=RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_FN=ifelse(Drafted!=RB_Pred,ifelse(RB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[1,"RB_TP"] = sum(CheckListRBNS$RB_TP)
ClassificationTreePerfMeas14[1,"RB_TN"] = sum(CheckListRBNS$RB_TN)
ClassificationTreePerfMeas14[1,"RB_FP"] = sum(CheckListRBNS$RB_FP)
ClassificationTreePerfMeas14[1,"RB_FN"] = sum(CheckListRBNS$RB_FN)
# Unsampled model / Together
CheckList = as.data.frame(cbind(DtestTogetherNS$Drafted, predict(ClassTreeTogetherNS, DtestTogetherNS)))
CheckListTogetherNS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(Together_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(Together_TP=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_TN=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==0,1,0),0)) %>%
mutate(Together_FP=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_FN=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[1,"Together_TP"] = sum(CheckListTogetherNS$Together_TP)
ClassificationTreePerfMeas14[1,"Together_TN"] = sum(CheckListTogetherNS$Together_TN)
ClassificationTreePerfMeas14[1,"Together_FP"] = sum(CheckListTogetherNS$Together_FP)
ClassificationTreePerfMeas14[1,"Together_FN"] = sum(CheckListTogetherNS$Together_FN)
# Oversampled 2014-----------------
# Oversampled model / QB
CheckList = as.data.frame(cbind(DtestQBNS$Drafted, predict(ClassTreeQBOS, DtestQBNS)))
CheckListQBOS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(QB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(QB_TP=ifelse(Drafted==QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_TN=ifelse(Drafted==QB_Pred,ifelse(QB_Pred==0,1,0),0)) %>%
mutate(QB_FP=ifelse(Drafted!=QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_FN=ifelse(Drafted!=QB_Pred,ifelse(QB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[2,"QB_TP"] = sum(CheckListQBOS$QB_TP)
ClassificationTreePerfMeas14[2,"QB_TN"] = sum(CheckListQBOS$QB_TN)
ClassificationTreePerfMeas14[2,"QB_FP"] = sum(CheckListQBOS$QB_FP)
ClassificationTreePerfMeas14[2,"QB_FN"] = sum(CheckListQBOS$QB_FN)
# Oversampled model / WR
CheckList = as.data.frame(cbind(DtestWRNS$Drafted, predict(ClassTreeWROS, DtestWRNS)))
CheckListWROS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(WR_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(WR_TP=ifelse(Drafted==WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_TN=ifelse(Drafted==WR_Pred,ifelse(WR_Pred==0,1,0),0)) %>%
mutate(WR_FP=ifelse(Drafted!=WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_FN=ifelse(Drafted!=WR_Pred,ifelse(WR_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[2,"WR_TP"] = sum(CheckListWROS$WR_TP)
ClassificationTreePerfMeas14[2,"WR_TN"] = sum(CheckListWROS$WR_TN)
ClassificationTreePerfMeas14[2,"WR_FP"] = sum(CheckListWROS$WR_FP)
ClassificationTreePerfMeas14[2,"WR_FN"] = sum(CheckListWROS$WR_FN)
# Oversampled model / RB
CheckList = as.data.frame(cbind(DtestRBNS$Drafted, predict(ClassTreeRBOS, DtestRBNS)))
CheckListRBOS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(RB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(RB_TP=ifelse(Drafted==RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_TN=ifelse(Drafted==RB_Pred,ifelse(RB_Pred==0,1,0),0)) %>%
mutate(RB_FP=ifelse(Drafted!=RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_FN=ifelse(Drafted!=RB_Pred,ifelse(RB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[2,"RB_TP"] = sum(CheckListRBOS$RB_TP)
ClassificationTreePerfMeas14[2,"RB_TN"] = sum(CheckListRBOS$RB_TN)
ClassificationTreePerfMeas14[2,"RB_FP"] = sum(CheckListRBOS$RB_FP)
ClassificationTreePerfMeas14[2,"RB_FN"] = sum(CheckListRBOS$RB_FN)
# Oversampled model / Together
CheckList = as.data.frame(cbind(DtestTogetherNS$Drafted, predict(ClassTreeTogetherOS, DtestTogetherNS)))
CheckListTogetherOS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(Together_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(Together_TP=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_TN=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==0,1,0),0)) %>%
mutate(Together_FP=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_FN=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[2,"Together_TP"] = sum(CheckListTogetherOS$Together_TP)
ClassificationTreePerfMeas14[2,"Together_TN"] = sum(CheckListTogetherOS$Together_TN)
ClassificationTreePerfMeas14[2,"Together_FP"] = sum(CheckListTogetherOS$Together_FP)
ClassificationTreePerfMeas14[2,"Together_FN"] = sum(CheckListTogetherOS$Together_FN)
# Undersampled 2014-----------------
# Undersampled model / QB
CheckList = as.data.frame(cbind(DtestQBNS$Drafted, predict(ClassTreeQBUS, DtestQBNS)))
CheckListQBUS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(QB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(QB_TP=ifelse(Drafted==QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_TN=ifelse(Drafted==QB_Pred,ifelse(QB_Pred==0,1,0),0)) %>%
mutate(QB_FP=ifelse(Drafted!=QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_FN=ifelse(Drafted!=QB_Pred,ifelse(QB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[3,"QB_TP"] = sum(CheckListQBUS$QB_TP)
ClassificationTreePerfMeas14[3,"QB_TN"] = sum(CheckListQBUS$QB_TN)
ClassificationTreePerfMeas14[3,"QB_FP"] = sum(CheckListQBUS$QB_FP)
ClassificationTreePerfMeas14[3,"QB_FN"] = sum(CheckListQBUS$QB_FN)
# Undersampled model / WR
CheckList = as.data.frame(cbind(DtestWRNS$Drafted, predict(ClassTreeWRUS, DtestWRNS)))
CheckListWRUS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(WR_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(WR_TP=ifelse(Drafted==WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_TN=ifelse(Drafted==WR_Pred,ifelse(WR_Pred==0,1,0),0)) %>%
mutate(WR_FP=ifelse(Drafted!=WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_FN=ifelse(Drafted!=WR_Pred,ifelse(WR_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[3,"WR_TP"] = sum(CheckListWRUS$WR_TP)
ClassificationTreePerfMeas14[3,"WR_TN"] = sum(CheckListWRUS$WR_TN)
ClassificationTreePerfMeas14[3,"WR_FP"] = sum(CheckListWRUS$WR_FP)
ClassificationTreePerfMeas14[3,"WR_FN"] = sum(CheckListWRUS$WR_FN)
# Undersampled model / RB
CheckList = as.data.frame(cbind(DtestRBNS$Drafted, predict(ClassTreeRBUS, DtestRBNS)))
CheckListRBUS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(RB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(RB_TP=ifelse(Drafted==RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_TN=ifelse(Drafted==RB_Pred,ifelse(RB_Pred==0,1,0),0)) %>%
mutate(RB_FP=ifelse(Drafted!=RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_FN=ifelse(Drafted!=RB_Pred,ifelse(RB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[3,"RB_TP"] = sum(CheckListRBUS$RB_TP)
ClassificationTreePerfMeas14[3,"RB_TN"] = sum(CheckListRBUS$RB_TN)
ClassificationTreePerfMeas14[3,"RB_FP"] = sum(CheckListRBUS$RB_FP)
ClassificationTreePerfMeas14[3,"RB_FN"] = sum(CheckListRBUS$RB_FN)
# Undersampled model / Together
CheckList = as.data.frame(cbind(DtestTogetherNS$Drafted, predict(ClassTreeTogetherUS, DtestTogetherNS)))
CheckListTogetherUS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(Together_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(Together_TP=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_TN=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==0,1,0),0)) %>%
mutate(Together_FP=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_FN=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[3,"Together_TP"] = sum(CheckListTogetherUS$Together_TP)
ClassificationTreePerfMeas14[3,"Together_TN"] = sum(CheckListTogetherUS$Together_TN)
ClassificationTreePerfMeas14[3,"Together_FP"] = sum(CheckListTogetherUS$Together_FP)
ClassificationTreePerfMeas14[3,"Together_FN"] = sum(CheckListTogetherUS$Together_FN)
# Rose Both 2014-----------------
# Rose Both model / QB
CheckList = as.data.frame(cbind(DtestQBNS$Drafted, predict(ClassTreeQBRO, DtestQBNS)))
CheckListQBRO = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(QB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(QB_TP=ifelse(Drafted==QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_TN=ifelse(Drafted==QB_Pred,ifelse(QB_Pred==0,1,0),0)) %>%
mutate(QB_FP=ifelse(Drafted!=QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_FN=ifelse(Drafted!=QB_Pred,ifelse(QB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[4,"QB_TP"] = sum(CheckListQBRO$QB_TP)
ClassificationTreePerfMeas14[4,"QB_TN"] = sum(CheckListQBRO$QB_TN)
ClassificationTreePerfMeas14[4,"QB_FP"] = sum(CheckListQBRO$QB_FP)
ClassificationTreePerfMeas14[4,"QB_FN"] = sum(CheckListQBRO$QB_FN)
# Rose Both model / WR
CheckList = as.data.frame(cbind(DtestWRNS$Drafted, predict(ClassTreeWRRO, DtestWRNS)))
CheckListWRRO = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(WR_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(WR_TP=ifelse(Drafted==WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_TN=ifelse(Drafted==WR_Pred,ifelse(WR_Pred==0,1,0),0)) %>%
mutate(WR_FP=ifelse(Drafted!=WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_FN=ifelse(Drafted!=WR_Pred,ifelse(WR_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[4,"WR_TP"] = sum(CheckListWRRO$WR_TP)
ClassificationTreePerfMeas14[4,"WR_TN"] = sum(CheckListWRRO$WR_TN)
ClassificationTreePerfMeas14[4,"WR_FP"] = sum(CheckListWRRO$WR_FP)
ClassificationTreePerfMeas14[4,"WR_FN"] = sum(CheckListWRRO$WR_FN)
# Rose Both model / RB
CheckList = as.data.frame(cbind(DtestRBNS$Drafted, predict(ClassTreeRBRO, DtestRBNS)))
CheckListRBRO = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(RB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(RB_TP=ifelse(Drafted==RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_TN=ifelse(Drafted==RB_Pred,ifelse(RB_Pred==0,1,0),0)) %>%
mutate(RB_FP=ifelse(Drafted!=RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_FN=ifelse(Drafted!=RB_Pred,ifelse(RB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[4,"RB_TP"] = sum(CheckListRBRO$RB_TP)
ClassificationTreePerfMeas14[4,"RB_TN"] = sum(CheckListRBRO$RB_TN)
ClassificationTreePerfMeas14[4,"RB_FP"] = sum(CheckListRBRO$RB_FP)
ClassificationTreePerfMeas14[4,"RB_FN"] = sum(CheckListRBRO$RB_FN)
# Rose Both model / Together
CheckList = as.data.frame(cbind(DtestTogetherNS$Drafted, predict(ClassTreeTogetherRO, DtestTogetherNS)))
CheckListTogetherRO = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(Together_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(Together_TP=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_TN=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==0,1,0),0)) %>%
mutate(Together_FP=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_FN=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[4,"Together_TP"] = sum(CheckListTogetherRO$Together_TP)
ClassificationTreePerfMeas14[4,"Together_TN"] = sum(CheckListTogetherRO$Together_TN)
ClassificationTreePerfMeas14[4,"Together_FP"] = sum(CheckListTogetherRO$Together_FP)
ClassificationTreePerfMeas14[4,"Together_FN"] = sum(CheckListTogetherRO$Together_FN)
# Smote 2014-----------------
# Smote model / QB
CheckList = as.data.frame(cbind(DtestQBNS$Drafted, predict(ClassTreeQBSM, DtestQBNS)))
CheckListQBSM = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(QB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(QB_TP=ifelse(Drafted==QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_TN=ifelse(Drafted==QB_Pred,ifelse(QB_Pred==0,1,0),0)) %>%
mutate(QB_FP=ifelse(Drafted!=QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_FN=ifelse(Drafted!=QB_Pred,ifelse(QB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[5,"QB_TP"] = sum(CheckListQBSM$QB_TP)
ClassificationTreePerfMeas14[5,"QB_TN"] = sum(CheckListQBSM$QB_TN)
ClassificationTreePerfMeas14[5,"QB_FP"] = sum(CheckListQBSM$QB_FP)
ClassificationTreePerfMeas14[5,"QB_FN"] = sum(CheckListQBSM$QB_FN)
# Smote model / WR
CheckList = as.data.frame(cbind(DtestWRNS$Drafted, predict(ClassTreeWRSM, DtestWRNS)))
CheckListWRSM = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(WR_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(WR_TP=ifelse(Drafted==WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_TN=ifelse(Drafted==WR_Pred,ifelse(WR_Pred==0,1,0),0)) %>%
mutate(WR_FP=ifelse(Drafted!=WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_FN=ifelse(Drafted!=WR_Pred,ifelse(WR_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[5,"WR_TP"] = sum(CheckListWRSM$WR_TP)
ClassificationTreePerfMeas14[5,"WR_TN"] = sum(CheckListWRSM$WR_TN)
ClassificationTreePerfMeas14[5,"WR_FP"] = sum(CheckListWRSM$WR_FP)
ClassificationTreePerfMeas14[5,"WR_FN"] = sum(CheckListWRSM$WR_FN)
# Smote model / RB
CheckList = as.data.frame(cbind(DtestRBNS$Drafted, predict(ClassTreeRBSM, DtestRBNS)))
CheckListRBSM = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(RB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(RB_TP=ifelse(Drafted==RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_TN=ifelse(Drafted==RB_Pred,ifelse(RB_Pred==0,1,0),0)) %>%
mutate(RB_FP=ifelse(Drafted!=RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_FN=ifelse(Drafted!=RB_Pred,ifelse(RB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[5,"RB_TP"] = sum(CheckListRBSM$RB_TP)
ClassificationTreePerfMeas14[5,"RB_TN"] = sum(CheckListRBSM$RB_TN)
ClassificationTreePerfMeas14[5,"RB_FP"] = sum(CheckListRBSM$RB_FP)
ClassificationTreePerfMeas14[5,"RB_FN"] = sum(CheckListRBSM$RB_FN)
# Smote model / Together
CheckList = as.data.frame(cbind(DtestTogetherNS$Drafted, predict(ClassTreeTogetherSM, DtestTogetherNS)))
CheckListTogetherSM = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(Together_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(Together_TP=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_TN=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==0,1,0),0)) %>%
mutate(Together_FP=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_FN=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[5,"Together_TP"] = sum(CheckListTogetherSM$Together_TP)
ClassificationTreePerfMeas14[5,"Together_TN"] = sum(CheckListTogetherSM$Together_TN)
ClassificationTreePerfMeas14[5,"Together_FP"] = sum(CheckListTogetherSM$Together_FP)
ClassificationTreePerfMeas14[5,"Together_FN"] = sum(CheckListTogetherSM$Together_FN)
save(ClassificationTreePerfMeas14, file = "../Data/PerformanceMeasurement/ClassificationTreePerfMeas14.Rdata")
<file_sep>/README.md
# DSF_NFLDraftPrediction
Predicting the likelihood of a college football player to be drafted in the NFL Draft.
<file_sep>/Project_Scripts/PerformanceMeasurement.R
# This script's goal is to aggregate the results of all the different models to compare them
# Load all the results of the different models
load("../Data/PerformanceMeasurement/ClassificationTreePerfMeas.Rdata")
load("../Data/PerformanceMeasurement/KNNPerfMeas.Rdata")
load("../Data/PerformanceMeasurement/NaiveBayesPerfMeas.Rdata")
load("../Data/PerformanceMeasurement/randomForestPerfMeas.Rdata")
load("../Data/PerformanceMeasurement/ANNPerfMeas.Rdata")
load("../Data/PerformanceMeasurement/LogisticRegressionPerfMeas.Rdata")
# Bring all the pieces together
PerfMeas07to13 = as.data.frame(rbind(ClassificationTreePerfMeas,KNNPerfMeas,NaiveBayesPerfMeas,randomForestPerfMeas, ANNPerfMeas, LogisticRegressionPerfMeas))
# Check whether the columns have always the same value inside (which means, that all methods
# used the unsampled data set and filtered by the right position for checking).
CheckTibble = data.frame(Method = PerfMeas07to13$Method, Sampling = PerfMeas07to13$Sampling, QB = NA, WR = NA, RB = NA, Together = NA, stringsAsFactors = FALSE)
for(i in 1:nrow(PerfMeas07to13)){
CheckTibble$QB[i] = PerfMeas07to13$QB_TP[i] + PerfMeas07to13$QB_TN[i] + PerfMeas07to13$QB_FN[i] + PerfMeas07to13$QB_FP[i]
CheckTibble$WR[i] = PerfMeas07to13$WR_TP[i] + PerfMeas07to13$WR_TN[i] + PerfMeas07to13$WR_FN[i] + PerfMeas07to13$WR_FP[i]
CheckTibble$RB[i] = PerfMeas07to13$RB_TP[i] + PerfMeas07to13$RB_TN[i] + PerfMeas07to13$RB_FN[i] + PerfMeas07to13$RB_FP[i]
CheckTibble$Together[i] = PerfMeas07to13$Together_TP[i] + PerfMeas07to13$Together_TN[i] + PerfMeas07to13$Together_FN[i] + PerfMeas07to13$Together_FP[i]
}
# Create an empty tibble
PerfMeasTibble = data.frame(Method = PerfMeas07to13$Method, Sampling = PerfMeas07to13$Sampling, QB_Acc = NA, QB_Pre=NA, QB_Rec= NA, QB_F1=NA, WR_Acc = NA, WR_Pre=NA, WR_Rec=NA, WR_F1=NA, RB_Acc = NA, RB_Pre=NA, RB_Rec=NA, RB_F1=NA, Together_Acc = NA, Together_Pre=NA, Together_Rec=NA, Together_F1=NA, stringsAsFactors = FALSE)
# Compute the Accuracy (based on the number of correct classifications)
for(i in 1:nrow(PerfMeas07to13)){
PerfMeasTibble$QB_Acc[i] = (PerfMeas07to13$QB_TP[i] + PerfMeas07to13$QB_TN[i])/(PerfMeas07to13$QB_TP[i] + PerfMeas07to13$QB_TN[i] + PerfMeas07to13$QB_FN[i] + PerfMeas07to13$QB_FP[i])
PerfMeasTibble$WR_Acc[i] = (PerfMeas07to13$WR_TP[i] + PerfMeas07to13$WR_TN[i])/(PerfMeas07to13$WR_TP[i] + PerfMeas07to13$WR_TN[i] + PerfMeas07to13$WR_FN[i] + PerfMeas07to13$WR_FP[i])
PerfMeasTibble$RB_Acc[i] = (PerfMeas07to13$RB_TP[i] + PerfMeas07to13$RB_TN[i])/(PerfMeas07to13$RB_TP[i] + PerfMeas07to13$RB_TN[i] + PerfMeas07to13$RB_FN[i] + PerfMeas07to13$RB_FP[i])
PerfMeasTibble$Together_Acc[i] = (PerfMeas07to13$Together_TP[i] + PerfMeas07to13$Together_TN[i])/(PerfMeas07to13$Together_TP[i] + PerfMeas07to13$Together_TN[i] + PerfMeas07to13$Together_FN[i] + PerfMeas07to13$Together_FP[i])
}
# Compute the Presicion TP/(TP+FP)
for(i in 1:nrow(PerfMeas07to13)){
PerfMeasTibble$QB_Pre[i] = (PerfMeas07to13$QB_TP[i])/(PerfMeas07to13$QB_TP[i] + PerfMeas07to13$QB_FP[i])
PerfMeasTibble$WR_Pre[i] = (PerfMeas07to13$WR_TP[i])/(PerfMeas07to13$WR_TP[i] + PerfMeas07to13$WR_FP[i])
PerfMeasTibble$RB_Pre[i] = (PerfMeas07to13$RB_TP[i])/(PerfMeas07to13$RB_TP[i] + PerfMeas07to13$RB_FP[i])
PerfMeasTibble$Together_Pre[i] = (PerfMeas07to13$Together_TP[i])/(PerfMeas07to13$Together_TP[i] + PerfMeas07to13$Together_FP[i])
}
# Compute the Recall TP/(TP+FN)
for(i in 1:nrow(PerfMeas07to13)){
PerfMeasTibble$QB_Rec[i] = (PerfMeas07to13$QB_TP[i])/(PerfMeas07to13$QB_TP[i] + PerfMeas07to13$QB_FN[i])
PerfMeasTibble$WR_Rec[i] = (PerfMeas07to13$WR_TP[i])/(PerfMeas07to13$WR_TP[i] + PerfMeas07to13$WR_FN[i])
PerfMeasTibble$RB_Rec[i] = (PerfMeas07to13$RB_TP[i])/(PerfMeas07to13$RB_TP[i] + PerfMeas07to13$RB_FN[i])
PerfMeasTibble$Together_Rec[i] = (PerfMeas07to13$Together_TP[i])/(PerfMeas07to13$Together_TP[i] + PerfMeas07to13$Together_FN[i])
}
# Compute the F1-Score = 2*(Precision*Recall/(Precision+Recall))
for(i in 1:nrow(PerfMeasTibble)){
PerfMeasTibble$QB_F1[i] = 2*(PerfMeasTibble$QB_Pre[i]*PerfMeasTibble$QB_Rec[i])/(PerfMeasTibble$QB_Pre[i] + PerfMeasTibble$QB_Rec[i])
PerfMeasTibble$WR_F1[i] = 2*(PerfMeasTibble$WR_Pre[i]*PerfMeasTibble$WR_Rec[i])/(PerfMeasTibble$WR_Pre[i] + PerfMeasTibble$WR_Rec[i])
PerfMeasTibble$RB_F1[i] = 2*(PerfMeasTibble$RB_Pre[i]*PerfMeasTibble$RB_Rec[i])/(PerfMeasTibble$RB_Pre[i] + PerfMeasTibble$RB_Rec[i])
PerfMeasTibble$Together_F1[i] = 2*(PerfMeasTibble$Together_Pre[i]*PerfMeasTibble$Together_Rec[i])/(PerfMeasTibble$Together_Pre[i] + PerfMeasTibble$Together_Rec[i])
}
# Look for the best Combinations
# Create an empty dataframe
ResultTibble = data.frame(Position = c("QB", "WR", "RB", "Together"), Method = NA, Sampling = NA, F1 = NA, Accuracy = NA)
# Fill the Methods
ResultTibble$Method[1] = PerfMeasTibble$Method[which.max(PerfMeasTibble$QB_F1)]
ResultTibble$Method[2] = PerfMeasTibble$Method[which.max(PerfMeasTibble$WR_F1)]
ResultTibble$Method[3] = PerfMeasTibble$Method[which.max(PerfMeasTibble$RB_F1)]
ResultTibble$Method[4] = PerfMeasTibble$Method[which.max(PerfMeasTibble$Together_F1)]
# Fill the Sampling methods
ResultTibble$Sampling[1] = PerfMeasTibble$Sampling[which.max(PerfMeasTibble$QB_F1)]
ResultTibble$Sampling[2] = PerfMeasTibble$Sampling[which.max(PerfMeasTibble$WR_F1)]
ResultTibble$Sampling[3] = PerfMeasTibble$Sampling[which.max(PerfMeasTibble$RB_F1)]
ResultTibble$Sampling[4] = PerfMeasTibble$Sampling[which.max(PerfMeasTibble$Together_F1)]
# Fill the value of F1-Score
ResultTibble$F1[1] = PerfMeasTibble$QB_F1[which.max(PerfMeasTibble$QB_F1)]
ResultTibble$F1[2] = PerfMeasTibble$WR_F1[which.max(PerfMeasTibble$WR_F1)]
ResultTibble$F1[3] = PerfMeasTibble$RB_F1[which.max(PerfMeasTibble$RB_F1)]
ResultTibble$F1[4] = PerfMeasTibble$Together_F1[which.max(PerfMeasTibble$Together_F1)]
# Fill the value of Accuracy
ResultTibble$Accuracy[1] = PerfMeasTibble$QB_Acc[which.max(PerfMeasTibble$QB_F1)]
ResultTibble$Accuracy[2] = PerfMeasTibble$WR_Acc[which.max(PerfMeasTibble$WR_F1)]
ResultTibble$Accuracy[3] = PerfMeasTibble$RB_Acc[which.max(PerfMeasTibble$RB_F1)]
ResultTibble$Accuracy[4] = PerfMeasTibble$Together_Acc[which.max(PerfMeasTibble$Together_F1)]
save(ResultTibble, file="../Data/PerformanceMeasurement/BestModels.Rdata")
save(PerfMeasTibble, file="../Data/PerformanceMeasurement/PerfMeasAllModels.Rdata")
# Checking with the 2014 data--------------
load("../Data/PerformanceMeasurement/randomForestPerfMeas2014.Rdata")
load("../Data/PerformanceMeasurement/ClassificationTreePerfMeas14.Rdata")
load("../Data/PerformanceMeasurement/ANNPerfMeas2014.Rdata")
load("../Data/PerformanceMeasurement/NaiveBayesPerfMeasTest.Rdata")
load("../Data/PerformanceMeasurement/KNNPerfMeasTest.Rdata")
load("../Data/PerformanceMeasurement/LogisticRegressionPerfMeas2014.Rdata")
PerfMeas14 = as.data.frame(rbind(ClassificationTreePerfMeas14, randomForestPerfMeas2014, ANNPerfMeas2014, KNNPerfMeasTest, NaiveBayesPerfMeasTest, LogisticRegressionPerfMeas2014))
CheckTibble14 = data.frame(Method = PerfMeas14$Method, Sampling = PerfMeas14$Sampling, QB = NA, WR = NA, RB = NA, Together = NA, stringsAsFactors = FALSE)
for(i in 1:nrow(PerfMeas14)){
CheckTibble14$QB[i] = PerfMeas14$QB_TP[i] + PerfMeas14$QB_TN[i] + PerfMeas14$QB_FN[i] + PerfMeas14$QB_FP[i]
CheckTibble14$WR[i] = PerfMeas14$WR_TP[i] + PerfMeas14$WR_TN[i] + PerfMeas14$WR_FN[i] + PerfMeas14$WR_FP[i]
CheckTibble14$RB[i] = PerfMeas14$RB_TP[i] + PerfMeas14$RB_TN[i] + PerfMeas14$RB_FN[i] + PerfMeas14$RB_FP[i]
CheckTibble14$Together[i] = PerfMeas14$Together_TP[i] + PerfMeas14$Together_TN[i] + PerfMeas14$Together_FN[i] + PerfMeas14$Together_FP[i]
}
# Create an empty tibble
PerfMeasTibble14 = data.frame(Method = PerfMeas14$Method, Sampling = PerfMeas14$Sampling, QB_Acc = NA, QB_Pre=NA, QB_Rec= NA, QB_F1=NA, WR_Acc = NA, WR_Pre=NA, WR_Rec=NA, WR_F1=NA, RB_Acc = NA, RB_Pre=NA, RB_Rec=NA, RB_F1=NA, Together_Acc = NA, Together_Pre=NA, Together_Rec=NA, Together_F1=NA, stringsAsFactors = FALSE)
# Compute the Accuracy (based on the number of correct classifications)
for(i in 1:nrow(PerfMeas14)){
PerfMeasTibble14$QB_Acc[i] = (PerfMeas14$QB_TP[i] + PerfMeas14$QB_TN[i])/(PerfMeas14$QB_TP[i] + PerfMeas14$QB_TN[i] + PerfMeas14$QB_FN[i] + PerfMeas14$QB_FP[i])
PerfMeasTibble14$WR_Acc[i] = (PerfMeas14$WR_TP[i] + PerfMeas14$WR_TN[i])/(PerfMeas14$WR_TP[i] + PerfMeas14$WR_TN[i] + PerfMeas14$WR_FN[i] + PerfMeas14$WR_FP[i])
PerfMeasTibble14$RB_Acc[i] = (PerfMeas14$RB_TP[i] + PerfMeas14$RB_TN[i])/(PerfMeas14$RB_TP[i] + PerfMeas14$RB_TN[i] + PerfMeas14$RB_FN[i] + PerfMeas14$RB_FP[i])
PerfMeasTibble14$Together_Acc[i] = (PerfMeas14$Together_TP[i] + PerfMeas14$Together_TN[i])/(PerfMeas14$Together_TP[i] + PerfMeas14$Together_TN[i] + PerfMeas14$Together_FN[i] + PerfMeas14$Together_FP[i])
}
# Compute the Presicion TP/(TP+FP)
for(i in 1:nrow(PerfMeas14)){
PerfMeasTibble14$QB_Pre[i] = (PerfMeas14$QB_TP[i])/(PerfMeas14$QB_TP[i] + PerfMeas14$QB_FP[i])
PerfMeasTibble14$WR_Pre[i] = (PerfMeas14$WR_TP[i])/(PerfMeas14$WR_TP[i] + PerfMeas14$WR_FP[i])
PerfMeasTibble14$RB_Pre[i] = (PerfMeas14$RB_TP[i])/(PerfMeas14$RB_TP[i] + PerfMeas14$RB_FP[i])
PerfMeasTibble14$Together_Pre[i] = (PerfMeas14$Together_TP[i])/(PerfMeas14$Together_TP[i] + PerfMeas14$Together_FP[i])
}
# Compute the Recall TP/(TP+FN)
for(i in 1:nrow(PerfMeas14)){
PerfMeasTibble14$QB_Rec[i] = (PerfMeas14$QB_TP[i])/(PerfMeas14$QB_TP[i] + PerfMeas14$QB_FN[i])
PerfMeasTibble14$WR_Rec[i] = (PerfMeas14$WR_TP[i])/(PerfMeas14$WR_TP[i] + PerfMeas14$WR_FN[i])
PerfMeasTibble14$RB_Rec[i] = (PerfMeas14$RB_TP[i])/(PerfMeas14$RB_TP[i] + PerfMeas14$RB_FN[i])
PerfMeasTibble14$Together_Rec[i] = (PerfMeas14$Together_TP[i])/(PerfMeas14$Together_TP[i] + PerfMeas14$Together_FN[i])
}
# Compute the F1-Score = 2*(Precision*Recall/(Precision+Recall))
for(i in 1:nrow(PerfMeasTibble14)){
PerfMeasTibble14$QB_F1[i] = 2*(PerfMeasTibble14$QB_Pre[i]*PerfMeasTibble14$QB_Rec[i])/(PerfMeasTibble14$QB_Pre[i] + PerfMeasTibble14$QB_Rec[i])
PerfMeasTibble14$WR_F1[i] = 2*(PerfMeasTibble14$WR_Pre[i]*PerfMeasTibble14$WR_Rec[i])/(PerfMeasTibble14$WR_Pre[i] + PerfMeasTibble14$WR_Rec[i])
PerfMeasTibble14$RB_F1[i] = 2*(PerfMeasTibble14$RB_Pre[i]*PerfMeasTibble14$RB_Rec[i])/(PerfMeasTibble14$RB_Pre[i] + PerfMeasTibble14$RB_Rec[i])
PerfMeasTibble14$Together_F1[i] = 2*(PerfMeasTibble14$Together_Pre[i]*PerfMeasTibble14$Together_Rec[i])/(PerfMeasTibble14$Together_Pre[i] + PerfMeasTibble14$Together_Rec[i])
}
# Look for the best Combinations
# Create an empty dataframe
ResultTibble14 = data.frame(Position = c("QB", "WR", "RB", "Together"), Method = NA, Sampling = NA, F1 = NA, Accuracy = NA)
# Fill the Methods
ResultTibble14$Method[1] = PerfMeasTibble14$Method[which.max(PerfMeasTibble14$QB_F1)]
ResultTibble14$Method[2] = PerfMeasTibble14$Method[which.max(PerfMeasTibble14$WR_F1)]
ResultTibble14$Method[3] = PerfMeasTibble14$Method[which.max(PerfMeasTibble14$RB_F1)]
ResultTibble14$Method[4] = PerfMeasTibble14$Method[which.max(PerfMeasTibble14$Together_F1)]
# Fill the Sampling methods
ResultTibble14$Sampling[1] = PerfMeasTibble14$Sampling[which.max(PerfMeasTibble14$QB_F1)]
ResultTibble14$Sampling[2] = PerfMeasTibble14$Sampling[which.max(PerfMeasTibble14$WR_F1)]
ResultTibble14$Sampling[3] = PerfMeasTibble14$Sampling[which.max(PerfMeasTibble14$RB_F1)]
ResultTibble14$Sampling[4] = PerfMeasTibble14$Sampling[which.max(PerfMeasTibble14$Together_F1)]
# Fill the value of F1-Score
ResultTibble14$F1[1] = PerfMeasTibble14$QB_F1[which.max(PerfMeasTibble14$QB_F1)]
ResultTibble14$F1[2] = PerfMeasTibble14$WR_F1[which.max(PerfMeasTibble14$WR_F1)]
ResultTibble14$F1[3] = PerfMeasTibble14$RB_F1[which.max(PerfMeasTibble14$RB_F1)]
ResultTibble14$F1[4] = PerfMeasTibble14$Together_F1[which.max(PerfMeasTibble14$Together_F1)]
# Fill the value of Accuracy
ResultTibble14$Accuracy[1] = PerfMeasTibble14$QB_Acc[which.max(PerfMeasTibble14$QB_F1)]
ResultTibble14$Accuracy[2] = PerfMeasTibble14$WR_Acc[which.max(PerfMeasTibble14$WR_F1)]
ResultTibble14$Accuracy[3] = PerfMeasTibble14$RB_Acc[which.max(PerfMeasTibble14$RB_F1)]
ResultTibble14$Accuracy[4] = PerfMeasTibble14$Together_Acc[which.max(PerfMeasTibble14$Together_F1)]
save(ResultTibble14, file="../Data/PerformanceMeasurement/BestModels14.Rdata")
save(PerfMeasTibble14, file="../Data/PerformanceMeasurement/PerfMeasAllModels14.Rdata")
<file_sep>/Project_Scripts/RandomForest.R
library(tidyverse)
library(randomForest)
library(ROCR)
# init ----
# Uncomment one of the lines to use the respective sampling
load("../Data/CleanData/CleanClass2007to2014_3.RData") # no sampling, don't comment this line out!
# load("../Data/CleanData/CleanClass2007to2013_3_oversampling.RData") # oversampling
# load("../Data/CleanData/CleanClass2007to2013_3_undersampling.RData") # undersampling
# load("../Data/CleanData/CleanClass2007to2013_3_Rose.both.RData") # ROSE both
# load("../Data/CleanData/CleanClass2007to2013_3_smote.RData") # SMOTE
# Uncomment one of the lines to use the respective sampling
cleanData <- as_tibble(CleanClass2007to2014_3) # no sampling, don't comment this line out!
# cleanData_s <- as_tibble(CleanClass2007to2014_3_oversampling) # oversampling
# cleanData_s <- as_tibble(CleanClass2007to2014_3_undersampling) # undersampling
# cleanData_s <- as_tibble(CleanClass2007to2014_3_Rose.both) # ROSE both
# cleanData_s <- as_tibble(cleanData_smote) # SMOTE
# Define performance measurement function
perfFun <- function(TP, FP, TN, FN){
accuracy <- (TP + TN)/(TP + FP + TN + FN)
precision <- TP/(TP + FP)
recall <- TP/(TP + FN)
F1 <- 2*((precision*recall)/(precision + recall))
out <- data.frame("Accuracy" = accuracy, "Precision" = precision, "Recall" = recall, "F1" = F1)
return(out)
}
# Prepare for between-model comparison of the training fit
# !!! ONLY RUN THIS ONCE !!!
randomForestPerfMeas = data.frame(Method = character(), Sampling = character(),
QB_TP = integer(), QB_TN = integer(), QB_FP = integer(), QB_FN = integer(),
WR_TP = integer(), WR_TN = integer(), WR_FP = integer(), WR_FN = integer(),
RB_TP = integer(), RB_TN = integer(), RB_FP = integer(), RB_FN = integer(),
Together_TP = integer(), Together_TN = integer(), Together_FP = integer(), Together_FN = integer(),
stringsAsFactors = FALSE)
randomForestPerfMeas[1, 2] = "no_sampling"
randomForestPerfMeas[2, 2] = "oversampling"
randomForestPerfMeas[3, 2] = "undersampling"
randomForestPerfMeas[4, 2] = "Rose_both"
randomForestPerfMeas[5, 2] = "Smote"
randomForestPerfMeas$Method = "randomForest"
# Prepare for between-model comparison with 2014 as testing data
# !!! ONLY RUN THIS ONCE !!!
randomForestPerfMeas2014 = data.frame(Method = character(), Sampling = character(),
QB_TP = integer(), QB_TN = integer(), QB_FP = integer(), QB_FN = integer(),
WR_TP = integer(), WR_TN = integer(), WR_FP = integer(), WR_FN = integer(),
RB_TP = integer(), RB_TN = integer(), RB_FP = integer(), RB_FN = integer(),
Together_TP = integer(), Together_TN = integer(), Together_FP = integer(), Together_FN = integer(),
stringsAsFactors = FALSE)
randomForestPerfMeas2014[1, 2] = "no_sampling"
randomForestPerfMeas2014[2, 2] = "oversampling"
randomForestPerfMeas2014[3, 2] = "undersampling"
randomForestPerfMeas2014[4, 2] = "Rose_both"
randomForestPerfMeas2014[5, 2] = "Smote"
randomForestPerfMeas2014$Method = "randomForest"
# Random Forest for QBs ----
# Select years 2007 through 2013 as training data
cleanData_QB <- cleanData %>% filter(., Year < 2014, Position == "QB") %>% drop_na(.)
# !!! Use the following line when working with ANY sampled data !!!
# cleanData_QB <- cleanData_s %>% filter(., Year < 2014, Position == "QB") %>% drop_na(.)
x <- cleanData_QB %>% mutate(., "y" = as.factor(Drafted)) %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted)
# !!! Use the following line ONLY when working with SMOTE sampled data and LEAVE COMMENTED otherwise !!!
# x <- cleanData_QB %>% mutate(., "y" = as.factor(Drafted)) %>% select(., -Player.Code, -Name, -Position, -Year, -Drafted)
# Randomly shuffle the data for cross validation
set.seed(6969)
x <- x[sample(nrow(x)),]
# Create 10 folds
folds <- cut(seq(1,nrow(x)),breaks=10,labels=FALSE)
# Create placeholders for confusion matrices and variable importance
confusion_QB <- as.data.frame(matrix(NA, 6, 10))
importance_QB <- as.data.frame(matrix(NA, 24, 10))
confusion_QB_cv <- as.data.frame(matrix(NA, 6, 10))
importance_QB_cv <- as.data.frame(matrix(NA, 24, 10))
row.names(confusion_QB_cv) <- c("TN", "FP", "FN", "TP", "class error 0", "class error 1")
# Perform 10 fold cross validation using different numbers of trees in the model
for (j in 1:10){
for(i in 1:10){
# Segement data by fold
testIndexes <- which(folds==i,arr.ind=TRUE)
testData <- x[testIndexes, ]
trainData <- x[-testIndexes, ]
# Use the test and train data partitions to run Random Forest classifier
RF_QB <- randomForest(y ~ ., data = x, ntree = j*100)
# Save confusion matrix for j*100 trees
confusion_QB[, i] <- as.vector(RF_QB$confusion)
# Save variable importance for j*100 trees
importance_QB[, i] <- as.vector(RF_QB$importance)
}
# Summarize cross validated confusion and variable importance for j*100 trees
confusion_QB_cv[, j] <- rowMeans(confusion_QB)
colnames(confusion_QB_cv)[j] <- as.character(j*100)
importance_QB_cv[, j] <- rowMeans(importance_QB)
colnames(importance_QB_cv)[j] <- as.character(j*100)
}
row.names(importance_QB_cv) <- names(RF_QB$forest$xlevels)
# Measure the performance of the models
perf <- perfFun(confusion_QB_cv["TP", 1], confusion_QB_cv["FP", 1], confusion_QB_cv["TN", 1], confusion_QB_cv["FN", 1])
for(i in 2:ncol(confusion_QB_cv)){
perf <- rbind(perf, perfFun(confusion_QB_cv["TP", i], confusion_QB_cv["FP", i], confusion_QB_cv["TN", i], confusion_QB_cv["FN", i]))
}
row.names(perf) <- as.character(seq(100, 1000, by = 100))
# Select the number of trees that yielded the highest recall (this performance measurement is chosen arbitrarily out of the
# four calculated because it suits the purpose of the analysis, other options or a combination of different measures should
# be explored in the future)
ntrees <- 100*which.max(perf$Recall)
# Run the model with the best performing number of trees
RF_QB <- randomForest(y ~ ., data = x, ntree = ntrees)
# Exploring the training fit on unsampled data
D_uns <- cleanData %>% filter(., Year < 2014, Position == "QB") %>% drop_na(.)
x_uns <- D_uns %>% mutate(., "y" = as.factor(Drafted)) %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted)
pred <- as.integer(as.vector(predict(RF_QB, x_uns)))
train_QB <- tibble("Code" = D_uns$Player.Code, "Name" = D_uns$Name, "Drafted" = D_uns$Drafted, "pred" = pred)
# Use year 2014 for testing
cleanData_QB_test <- cleanData %>% filter(., Year == 2014, Position == "QB")
x_test <- cleanData_QB_test %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted)
pred <- as.integer(as.vector(predict(RF_QB, x_test)))
# Combine predictions and player data
pred_QB <- tibble("Code" = cleanData_QB_test$Player.Code, "Name" = cleanData_QB_test$Name, "Drafted" = cleanData_QB_test$Drafted, "pred" = pred)
# Plot variable importance for the best cross validated model
varImpPlot(RF_QB, main = "Variable Importance QB")
# Plot the ROC curve for the best cross validated model
pred <- predict(RF_QB, x_test, type = "prob")
pred <- prediction(pred[,2], cleanData_QB_test$Drafted)
perf <- performance(pred, measure = "tpr", x.measure = "fpr")
plot(perf, colorize = TRUE, main = "ROC Curve QB")
# Random Forest for RBs ----
# Select years 2007 through 2013 as training data
cleanData_RB <- cleanData %>% filter(., Year < 2014, Position == "RB") %>% drop_na(.)
# !!! Use the following line when working with ANY sampled data !!!
# cleanData_RB <- cleanData_s %>% filter(., Year < 2014, Position == "RB") %>% drop_na(.)
x <- cleanData_RB %>% mutate(., "y" = as.factor(Drafted)) %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted)
# !!! Use the following line ONLY when working with SMOTE sampled data and LEAVE COMMENTED otherwise !!!
# x <- cleanData_RB %>% mutate(., "y" = as.factor(Drafted)) %>% select(., -Player.Code, -Name, -Position, -Year, -Drafted)
# Randomly shuffle the data for cross validation
set.seed(6969)
x <- x[sample(nrow(x)),]
# Create 10 folds
folds <- cut(seq(1,nrow(x)),breaks=10,labels=FALSE)
# Create placeholders for confusion matrices and variable importance
confusion_RB <- as.data.frame(matrix(NA, 6, 10))
importance_RB <- as.data.frame(matrix(NA, 24, 10))
confusion_RB_cv <- as.data.frame(matrix(NA, 6, 10))
importance_RB_cv <- as.data.frame(matrix(NA, 24, 10))
row.names(confusion_RB_cv) <- c("TN", "FP", "FN", "TP", "class error 0", "class error 1")
# Perform 10 fold cross validation using different numbers of trees in the model
for (j in 1:10){
for(i in 1:10){
# Segement data by fold
testIndexes <- which(folds==i,arr.ind=TRUE)
testData <- x[testIndexes, ]
trainData <- x[-testIndexes, ]
# Use the test and train data partitions to run Random Forest classifier
RF_RB <- randomForest(y ~ ., data = x, ntree = j*100)
# Save confusion matrix for j*100 trees
confusion_RB[, i] <- as.vector(RF_RB$confusion)
# Save variable importance for j*100 trees
importance_RB[, i] <- as.vector(RF_RB$importance)
}
# Summarize cross validated confusion and variable importance for j*100 trees
confusion_RB_cv[, j] <- rowMeans(confusion_RB)
colnames(confusion_RB_cv)[j] <- as.character(j*100)
importance_RB_cv[, j] <- rowMeans(importance_RB)
colnames(importance_RB_cv)[j] <- as.character(j*100)
}
row.names(importance_RB_cv) <- names(RF_RB$forest$xlevels)
# Measure the performance of the models
perf <- perfFun(confusion_RB_cv["TP", 1], confusion_RB_cv["FP", 1], confusion_RB_cv["TN", 1], confusion_RB_cv["FN", 1])
for(i in 2:ncol(confusion_RB_cv)){
perf <- rbind(perf, perfFun(confusion_RB_cv["TP", i], confusion_RB_cv["FP", i], confusion_RB_cv["TN", i], confusion_RB_cv["FN", i]))
}
row.names(perf) <- as.character(seq(100, 1000, by = 100))
# Select the number of trees that yielded the highest recall (this performance measurement is chosen arbitrarily out of the
# four calculated because it suits the purpose of the analysis, other options or a combination of different measures should
# be explored in the future)
ntrees <- 100*which.max(perf$Recall)
# Run the model with the best performing number of trees
RF_RB <- randomForest(y ~ ., data = x, ntree = ntrees)
# Exploring the training fit on unsampled data
D_uns <- cleanData %>% filter(., Year < 2014, Position == "RB") %>% drop_na(.)
x_uns <- D_uns %>% mutate(., "y" = as.factor(Drafted)) %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted)
pred <- as.integer(as.vector(predict(RF_RB, x_uns)))
train_RB <- tibble("Code" = D_uns$Player.Code, "Name" = D_uns$Name, "Drafted" = D_uns$Drafted, "pred" = pred)
# Use year 2014 for testing
cleanData_RB_test <- cleanData %>% filter(., Year == 2014, Position == "RB")
x_test <- cleanData_RB_test %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted)
pred <- as.integer(as.vector(predict(RF_RB, x_test)))
# Combine predictions and player data
pred_RB <- tibble("Code" = cleanData_RB_test$Player.Code, "Name" = cleanData_RB_test$Name, "Drafted" = cleanData_RB_test$Drafted, "pred" = pred)
# Plot variable importance for the best cross validated model
varImpPlot(RF_RB, main = "Variable Importance RB")
# Plot the ROC curve for the best cross validated model
pred <- predict(RF_RB, x_test, type = "prob")
pred <- prediction(pred[,2], cleanData_RB_test$Drafted)
perf <- performance(pred, measure = "tpr", x.measure = "fpr")
plot(perf, colorize = TRUE, main = "ROC Curve RB")
# Random Forest for WRs ----
# Select years 2007 through 2013 as training data
cleanData_WR <- cleanData %>% filter(., Year < 2014, Position == "WR") %>% drop_na(.)
# !!! Use the following line when working with ANY sampled data !!!
# cleanData_WR <- cleanData_s %>% filter(., Year < 2014, Position == "WR") %>% drop_na(.)
x <- cleanData_WR %>% mutate(., "y" = as.factor(Drafted)) %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted)
# !!! Use the following line ONLY when working with SMOTE sampled data and LEAVE COMMENTED otherwise !!!
# x <- cleanData_WR %>% mutate(., "y" = as.factor(Drafted)) %>% select(., -Player.Code, -Name, -Position, -Year, -Drafted)
# Randomly shuffle the data for cross validation
set.seed(6969)
x <- x[sample(nrow(x)),]
# Create 10 folds
folds <- cut(seq(1,nrow(x)),breaks=10,labels=FALSE)
# Create placeholders for confusion matrices and variable importance
confusion_WR <- as.data.frame(matrix(NA, 6, 10))
importance_WR <- as.data.frame(matrix(NA, 24, 10))
confusion_WR_cv <- as.data.frame(matrix(NA, 6, 10))
importance_WR_cv <- as.data.frame(matrix(NA, 24, 10))
row.names(confusion_WR_cv) <- c("TN", "FP", "FN", "TP", "class error 0", "class error 1")
# Perform 10 fold cross validation using different numbers of trees in the model
for (j in 1:10){
for(i in 1:10){
# Segement data by fold
testIndexes <- which(folds==i,arr.ind=TRUE)
testData <- x[testIndexes, ]
trainData <- x[-testIndexes, ]
# Use the test and train data partitions to run Random Forest classifier
RF_WR <- randomForest(y ~ ., data = x, ntree = j*100)
# Save confusion matrix for j*100 trees
confusion_WR[, i] <- as.vector(RF_WR$confusion)
# Save variable importance for j*100 trees
importance_WR[, i] <- as.vector(RF_WR$importance)
}
# Summarize cross validated confusion and variable importance for j*100 trees
confusion_WR_cv[, j] <- rowMeans(confusion_WR)
colnames(confusion_WR_cv)[j] <- as.character(j*100)
importance_WR_cv[, j] <- rowMeans(importance_WR)
colnames(importance_WR_cv)[j] <- as.character(j*100)
}
row.names(importance_WR_cv) <- names(RF_WR$forest$xlevels)
# Measure the performance of the models
perf <- perfFun(confusion_WR_cv["TP", 1], confusion_WR_cv["FP", 1], confusion_WR_cv["TN", 1], confusion_WR_cv["FN", 1])
for(i in 2:ncol(confusion_WR_cv)){
perf <- rbind(perf, perfFun(confusion_WR_cv["TP", i], confusion_WR_cv["FP", i], confusion_WR_cv["TN", i], confusion_WR_cv["FN", i]))
}
row.names(perf) <- as.character(seq(100, 1000, by = 100))
# Select the number of trees that yielded the highest recall (this performance measurement is chosen arbitrarily out of the
# four calculated because it suits the purpose of the analysis, other options or a combination of different measures should
# be explored in the future)
ntrees <- 100*which.max(perf$Recall)
# Run the model with the best performing number of trees
RF_WR <- randomForest(y ~ ., data = x, ntree = ntrees)
# Exploring the training fit on unsampled data
D_uns <- cleanData %>% filter(., Year < 2014, Position == "WR") %>% drop_na(.)
x_uns <- D_uns %>% mutate(., "y" = as.factor(Drafted)) %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted)
pred <- as.integer(as.vector(predict(RF_WR, x_uns)))
train_WR <- tibble("Code" = D_uns$Player.Code, "Name" = D_uns$Name, "Drafted" = D_uns$Drafted, "pred" = pred)
# Use year 2014 for testing
cleanData_WR_test <- cleanData %>% filter(., Year == 2014, Position == "WR")
x_test <- cleanData_WR_test %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted)
pred <- as.integer(as.vector(predict(RF_WR, x_test)))
# Combine predictions and player data
pred_WR <- tibble("Code" = cleanData_WR_test$Player.Code, "Name" = cleanData_WR_test$Name, "Drafted" = cleanData_WR_test$Drafted, "pred" = pred)
# Plot variable importance for the best cross validated model
varImpPlot(RF_WR, main = "Variable Importance WR")
# Plot the ROC curve for the best cross validated model
pred <- predict(RF_WR, x_test, type = "prob")
pred <- prediction(pred[,2], cleanData_WR_test$Drafted)
perf <- performance(pred, measure = "tpr", x.measure = "fpr")
plot(perf, colorize = TRUE, main = "ROC Curve WR")
# Random Forest for all positions ----
# Select years 2007 through 2013 as training data
cleanData_all <- cleanData %>% filter(., Year < 2014) %>% drop_na(.)
# !!! Use the following line when working with ANY sampled data !!!
# cleanData_all <- cleanData_s %>% filter(., Year < 2014) %>% drop_na(.)
x <- cleanData_all %>% mutate(., "y" = as.factor(Drafted)) %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted)
# !!! Use the following line ONLY when working with SMOTE sampled data and LEAVE COMMENTED otherwise !!!
# x <- cleanData_all %>% mutate(., "y" = as.factor(Drafted)) %>% select(., -Player.Code, -Name, -Position, -Year, -Drafted)
# Randomly shuffle the data for cross validation
set.seed(6969)
x <- x[sample(nrow(x)),]
# Create 10 folds
folds <- cut(seq(1,nrow(x)),breaks=10,labels=FALSE)
# Create placeholders for confusion matrices and variable importance
confusion_all <- as.data.frame(matrix(NA, 6, 10))
importance_all <- as.data.frame(matrix(NA, 24, 10))
confusion_all_cv <- as.data.frame(matrix(NA, 6, 10))
importance_all_cv <- as.data.frame(matrix(NA, 24, 10))
row.names(confusion_all_cv) <- c("TN", "FP", "FN", "TP", "class error 0", "class error 1")
# Perform 10 fold cross validation using different numbers of trees in the model
for (j in 1:10){
for(i in 1:10){
# Segement data by fold
testIndexes <- which(folds==i,arr.ind=TRUE)
testData <- x[testIndexes, ]
trainData <- x[-testIndexes, ]
# Use the test and train data partitions to run Random Forest classifier
RF_all <- randomForest(y ~ ., data = x, ntree = j*100)
# Save confusion matrix for j*100 trees
confusion_all[, i] <- as.vector(RF_all$confusion)
# Save variable importance for j*100 trees
importance_all[, i] <- as.vector(RF_all$importance)
}
# Summarize cross validated confusion and variable importance for j*100 trees
confusion_all_cv[, j] <- rowMeans(confusion_all)
colnames(confusion_all_cv)[j] <- as.character(j*100)
importance_all_cv[, j] <- rowMeans(importance_all)
colnames(importance_all_cv)[j] <- as.character(j*100)
}
row.names(importance_all_cv) <- names(RF_all$forest$xlevels)
# Measure the performance of the models
perf <- perfFun(confusion_all_cv["TP", 1], confusion_all_cv["FP", 1], confusion_all_cv["TN", 1], confusion_all_cv["FN", 1])
for(i in 2:ncol(confusion_all_cv)){
perf <- rbind(perf, perfFun(confusion_all_cv["TP", i], confusion_all_cv["FP", i], confusion_all_cv["TN", i], confusion_all_cv["FN", i]))
}
row.names(perf) <- as.character(seq(100, 1000, by = 100))
# Select the number of trees that yielded the highest recall (this performance measurement is chosen arbitrarily out of the
# four calculated because it suits the purpose of the analysis, other options or a combination of different measures should
# be explored in the future)
ntrees <- 100*which.max(perf$Recall)
# Run the model with the best performing number of trees
RF_all <- randomForest(y ~ ., data = x, ntree = ntrees)
# Exploring the training fit on unsampled data
D_uns <- cleanData %>% filter(., Year < 2014) %>% drop_na(.)
x_uns <- D_uns %>% mutate(., "y" = as.factor(Drafted)) %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted)
pred <- as.integer(as.vector(predict(RF_all, x_uns)))
train_all <- tibble("Code" = D_uns$Player.Code, "Name" = D_uns$Name, "Drafted" = D_uns$Drafted, "pred" = pred)
# Use year 2014 for testing
cleanData_all_test <- cleanData %>% filter(., Year == 2014)
x_test <- cleanData_all_test %>% select(., -Player.Code, -Name, -Class, -Position, -Year, -Drafted)
pred <- as.integer(as.vector(predict(RF_all, x_test)))
# Combine predictions and player data
pred_all <- tibble("Code" = cleanData_all_test$Player.Code, "Name" = cleanData_all_test$Name, "Drafted" = cleanData_all_test$Drafted, "pred" = pred)
# Plot variable importance for the best cross validated model
varImpPlot(RF_all, main = "Variable Importance All Positions")
# Plot the ROC curve for the best cross validated model
pred <- predict(RF_all, x_test, type = "prob")
pred <- prediction(pred[,2], cleanData_all_test$Drafted)
perf <- performance(pred, measure = "tpr", x.measure = "fpr")
plot(perf, colorize = TRUE, main = "ROC Curve All Positions")
# Aggregate Results ----
resultsComb_separate <- tibble("Player.Code" = c(pred_QB$Code, pred_RB$Code, pred_WR$Code),
"Name" = c(pred_QB$Name, pred_RB$Name, pred_WR$Name),
"Pred" = ifelse(c(pred_QB$pred, pred_RB$pred, pred_WR$pred) >= 0.5, 1, 0),
"Drafted" = c(pred_QB$Drafted, pred_RB$Drafted, pred_WR$Drafted))
resultsComb_separate <- resultsComb_separate %>% mutate(., "error" = ifelse(Pred != Drafted, 1, 0),
"TP" = ifelse(Pred == Drafted & Drafted == 1, 1, 0),
"FP" = ifelse(Pred != Drafted & Drafted == 0, 1, 0),
"TN" = ifelse(Pred == Drafted & Drafted == 0, 1, 0),
"FN" = ifelse(Pred != Drafted & Drafted == 1, 1, 0))
resultsComb_all <- tibble("Player.Code" = pred_all$Code,
"Name" = pred_all$Name,
"Pred" = ifelse(pred_all$pred >= 0.5, 1, 0),
"Drafted" = pred_all$Drafted)
resultsComb_all <- resultsComb_all %>% mutate(., "error" = ifelse(Pred != Drafted, 1, 0),
"TP" = ifelse(Pred == Drafted & Drafted == 1, 1, 0),
"FP" = ifelse(Pred != Drafted & Drafted == 0, 1, 0),
"TN" = ifelse(Pred == Drafted & Drafted == 0, 1, 0),
"FN" = ifelse(Pred != Drafted & Drafted == 1, 1, 0))
# Fill in the training fit data
# !!! Note: row index has to be changed depending on the sampling used !!!
rowInd <- 1 # no sampling
# rowInd <- 2 # oversampling
# rowInd <- 3 # undersampling
# rowInd <- 4 # Rose_both
# rowInd <- 5 # SMOTE
randomForestPerfMeas[rowInd, "QB_TP"] = sum(ifelse(ifelse(train_QB$pred >= 0.5, 1, 0) == train_QB$Drafted & train_QB$Drafted == 1, 1, 0))
randomForestPerfMeas[rowInd, "QB_TN"] = sum(ifelse(ifelse(train_QB$pred >= 0.5, 1, 0) == train_QB$Drafted & train_QB$Drafted == 0, 1, 0))
randomForestPerfMeas[rowInd, "QB_FP"] = sum(ifelse(ifelse(train_QB$pred >= 0.5, 1, 0) != train_QB$Drafted & train_QB$Drafted == 1, 1, 0))
randomForestPerfMeas[rowInd, "QB_FN"] = sum(ifelse(ifelse(train_QB$pred >= 0.5, 1, 0) != train_QB$Drafted & train_QB$Drafted == 0, 1, 0))
randomForestPerfMeas[rowInd, "RB_TP"] = sum(ifelse(ifelse(train_RB$pred >= 0.5, 1, 0) == train_RB$Drafted & train_RB$Drafted == 1, 1, 0))
randomForestPerfMeas[rowInd, "RB_TN"] = sum(ifelse(ifelse(train_RB$pred >= 0.5, 1, 0) == train_RB$Drafted & train_RB$Drafted == 0, 1, 0))
randomForestPerfMeas[rowInd, "RB_FP"] = sum(ifelse(ifelse(train_RB$pred >= 0.5, 1, 0) != train_RB$Drafted & train_RB$Drafted == 1, 1, 0))
randomForestPerfMeas[rowInd, "RB_FN"] = sum(ifelse(ifelse(train_RB$pred >= 0.5, 1, 0) != train_RB$Drafted & train_RB$Drafted == 0, 1, 0))
randomForestPerfMeas[rowInd, "WR_TP"] = sum(ifelse(ifelse(train_WR$pred >= 0.5, 1, 0) == train_WR$Drafted & train_WR$Drafted == 1, 1, 0))
randomForestPerfMeas[rowInd, "WR_TN"] = sum(ifelse(ifelse(train_WR$pred >= 0.5, 1, 0) == train_WR$Drafted & train_WR$Drafted == 0, 1, 0))
randomForestPerfMeas[rowInd, "WR_FP"] = sum(ifelse(ifelse(train_WR$pred >= 0.5, 1, 0) != train_WR$Drafted & train_WR$Drafted == 1, 1, 0))
randomForestPerfMeas[rowInd, "WR_FN"] = sum(ifelse(ifelse(train_WR$pred >= 0.5, 1, 0) != train_WR$Drafted & train_WR$Drafted == 0, 1, 0))
randomForestPerfMeas[rowInd, "Together_TP"] = sum(ifelse(ifelse(train_all$pred >= 0.5, 1, 0) == train_all$Drafted & train_all$Drafted == 1, 1, 0))
randomForestPerfMeas[rowInd, "Together_TN"] = sum(ifelse(ifelse(train_all$pred >= 0.5, 1, 0) == train_all$Drafted & train_all$Drafted == 0, 1, 0))
randomForestPerfMeas[rowInd, "Together_FP"] = sum(ifelse(ifelse(train_all$pred >= 0.5, 1, 0) != train_all$Drafted & train_all$Drafted == 1, 1, 0))
randomForestPerfMeas[rowInd, "Together_FN"] = sum(ifelse(ifelse(train_all$pred >= 0.5, 1, 0) != train_all$Drafted & train_all$Drafted == 0, 1, 0))
# Fill in the testing fit data
randomForestPerfMeas2014[rowInd, "QB_TP"] = sum(ifelse(pred_QB$pred == pred_QB$Drafted & pred_QB$pred == 1, 1, 0))
randomForestPerfMeas2014[rowInd, "QB_TN"] = sum(ifelse(pred_QB$pred == pred_QB$Drafted & pred_QB$pred == 0, 1, 0))
randomForestPerfMeas2014[rowInd, "QB_FP"] = sum(ifelse(pred_QB$pred != pred_QB$Drafted & pred_QB$pred == 1, 1, 0))
randomForestPerfMeas2014[rowInd, "QB_FN"] = sum(ifelse(pred_QB$pred != pred_QB$Drafted & pred_QB$pred == 0, 1, 0))
randomForestPerfMeas2014[rowInd, "RB_TP"] = sum(ifelse(pred_RB$pred == pred_RB$Drafted & pred_RB$pred == 1, 1, 0))
randomForestPerfMeas2014[rowInd, "RB_TN"] = sum(ifelse(pred_RB$pred == pred_RB$Drafted & pred_RB$pred == 0, 1, 0))
randomForestPerfMeas2014[rowInd, "RB_FP"] = sum(ifelse(pred_RB$pred != pred_RB$Drafted & pred_RB$pred == 1, 1, 0))
randomForestPerfMeas2014[rowInd, "RB_FN"] = sum(ifelse(pred_RB$pred != pred_RB$Drafted & pred_RB$pred == 0, 1, 0))
randomForestPerfMeas2014[rowInd, "WR_TP"] = sum(ifelse(pred_WR$pred == pred_WR$Drafted & pred_WR$pred == 1, 1, 0))
randomForestPerfMeas2014[rowInd, "WR_TN"] = sum(ifelse(pred_WR$pred == pred_WR$Drafted & pred_WR$pred == 0, 1, 0))
randomForestPerfMeas2014[rowInd, "WR_FP"] = sum(ifelse(pred_WR$pred != pred_WR$Drafted & pred_WR$pred == 1, 1, 0))
randomForestPerfMeas2014[rowInd, "WR_FN"] = sum(ifelse(pred_WR$pred != pred_WR$Drafted & pred_WR$pred == 0, 1, 0))
randomForestPerfMeas2014[rowInd, "Together_TP"] = sum(ifelse(pred_all$pred == pred_all$Drafted & pred_all$pred == 1, 1, 0))
randomForestPerfMeas2014[rowInd, "Together_TN"] = sum(ifelse(pred_all$pred == pred_all$Drafted & pred_all$pred == 0, 1, 0))
randomForestPerfMeas2014[rowInd, "Together_FP"] = sum(ifelse(pred_all$pred != pred_all$Drafted & pred_all$pred == 1, 1, 0))
randomForestPerfMeas2014[rowInd, "Together_FN"] = sum(ifelse(pred_all$pred != pred_all$Drafted & pred_all$pred == 0, 1, 0))
# Save the results for model comparison
save(randomForestPerfMeas2014, file = "../Data/PerformanceMeasurement/randomForestPerfMeas2014.Rdata")
save(randomForestPerfMeas, file = "../Data/PerformanceMeasurement/randomForestPerfMeas.Rdata")
|
3478901ab27212ba5798db9b6a8813b7ef596724
|
[
"Markdown",
"R",
"RMarkdown"
] | 18 |
R
|
NicSchuler/DSF_NFLDraftPrediction
|
301032b0b9f7053a5ac8303cf2357de496c53925
|
016e341a2f870d98af4bcaa21a747e224d2150ee
|
refs/heads/main
|
<repo_name>HugoCastroBR/Instagram-clone-front<file_sep>/src/domain/errors/index.ts
export * from './internal-server-error'
export * from './invalid-credentials'
export * from './not-found-error'
export * from './unexpected-error'<file_sep>/src/data/usecases/authentication/remote-authentication.spec.ts
import { HttpPostClientSpy } from '@/data/test/mock-http-client'
import { RemoteAuthentication } from '@/data/usecases/authentication/remote-authentication'
import faker from 'faker'
import { mockAccountModel, mockAuthentication } from '@/domain/test/mock-account';
import { InvalidCredentialsError } from '@/domain/errors/invalid-credentials';
import { HttpStatusCode } from '@/data/protocols/http/http-response';
import { UnexpectedError } from '@/domain/errors/unexpected-error';
import { NotFoundError } from '@/domain/errors/not-found-error';
import { InternalServerError } from './../../../domain/errors/internal-server-error';
import { AccountModel } from '@/domain/models/account-model';
import { AuthenticationParams } from '@/domain/usecases/authentication';
type SutTypes = {
sut: RemoteAuthentication
httpPostClientSpy: HttpPostClientSpy<AuthenticationParams,AccountModel>
}
const makeSut = (url = faker.internet.url()): SutTypes => {
const httpPostClientSpy = new HttpPostClientSpy<AuthenticationParams,AccountModel>()
const sut = new RemoteAuthentication(url, httpPostClientSpy)
return {
sut,
httpPostClientSpy
}
}
describe('RemoteAuthentication', () => {
test('Should call httpPostClient with correct URL', () => {
const url = faker.internet.url()
const { sut, httpPostClientSpy } = makeSut(url)
sut.auth(mockAuthentication())
expect(httpPostClientSpy.url).toBe(url)
})
test('Should call httpPostClient with correct Body', () => {
const { sut, httpPostClientSpy } = makeSut()
const authenticationParams = mockAuthentication()
sut.auth(authenticationParams)
expect(httpPostClientSpy.body).toEqual(authenticationParams)
})
test('Should throw InvalidCredentialsError if HttpPostClient returns 401', async () => {
const { sut, httpPostClientSpy } = makeSut()
httpPostClientSpy.response = {
statusCode: HttpStatusCode.unauthorized
}
const promise = sut.auth(mockAuthentication())
await expect(promise).rejects.toThrow(new InvalidCredentialsError())
})
test('Should throw UnexpectedError if HttpPostClient returns 400', async () => {
const { sut, httpPostClientSpy } = makeSut()
httpPostClientSpy.response = {
statusCode: HttpStatusCode.badRequest
}
const promise = sut.auth(mockAuthentication())
await expect(promise).rejects.toThrow(new UnexpectedError())
})
test('Should throw NotFoundError if HttpPostClient returns 404', async () => {
const { sut, httpPostClientSpy } = makeSut()
httpPostClientSpy.response = {
statusCode: HttpStatusCode.notFound
}
const promise = sut.auth(mockAuthentication())
await expect(promise).rejects.toThrow(new NotFoundError())
})
test('Should throw InternalServerError if HttpPostClient returns 500', async () => {
const { sut, httpPostClientSpy } = makeSut()
httpPostClientSpy.response = {
statusCode: HttpStatusCode.internalServerError
}
const promise = sut.auth(mockAuthentication())
await expect(promise).rejects.toThrow(new InternalServerError())
})
test('Should return an AccountModel if HttpPostClient returns 200', async () => {
const { sut, httpPostClientSpy } = makeSut()
const httpResult = mockAccountModel()
httpPostClientSpy.response = {
statusCode: HttpStatusCode.ok,
body: httpResult
}
const account = await sut.auth(mockAuthentication())
await expect(account).toEqual(httpResult)
})
})
|
213946cbb29af0d94e3c750c68f4a896f53df2f6
|
[
"TypeScript"
] | 2 |
TypeScript
|
HugoCastroBR/Instagram-clone-front
|
66013a09b050dc08e2306dcda9984a5022f9cc03
|
8b8f81e32128eb8fa4967d1b953c32caef63a86f
|
refs/heads/master
|
<file_sep># transparency Project Repository
Copyright : gmartinon
easy shap
# Getting Started
**[Generic Development Tutorials by Quantmetry](https://gitlab.com/quantmetry/qmtools/TemplateCookieCutter/tree/master/tutorials)**
## 0. Clone this repository
```
$ git clone <this project>
$ cd <this project>
```
## 1. Setup your virtual environment and activate it
Goal : create a local virtual environment in the folder `./.venv/`.
- First: check your python3 version:
```
$ python3 --version
# examples of outputs:
Python 3.6.2 :: Anaconda, Inc.
Python 3.7.2
$ which python3
/Users/benjamin/anaconda3/bin/python3
/usr/bin/python3
```
- If you don't have python3 and you are working on your mac: install it from [python.org](https://www.python.org/downloads/)
- If you don't have python3 and are working on an ubuntu-like system: install from package manager:
```
$ apt-get update
$ apt-get -y install python3 python3-pip python3-venv
```
- Now that python3 is installed create your environment and activate it:
```
$ make init
$ source activate.sh
```
You sould **allways** activate your environment when working on the project.
If it fails with one of the following message :
```
"ERROR: failed to create the .venv : do it yourself!"
"ERROR: failed to activate virtual environment .venv! ask for advice on #dev "
```
instructions on how to create an environment by yourself can be found in the
[tutorials about virtual environments](https://gitlab.com/quantmetry/qmtools/TemplateCookieCutter/blob/master/tutorials/virtualenv.md)
## 2. Install the project's requirements
```
(path/to/here/.venv)$ make install
(path/to/here/.venv)$ make install-dev
```
## 3. Check that everything is running properly. The `Makefile` comes with useful features:
```
$ make help
coverage run code coverage (% of code tested)
doc build documentation from docstring
help Show this help.
install-dev install developpment dependencies (for testing, linting etc.)
install install project dependencies (requirements.txt)
lint Check that your code follows the PEP8 standards
pipeline run main project pipeline
tests run unit tests
# start the tests:
$ make tests
```
## 4. Start coding!
Your code will go in the folder `transparency/`.
You can change your settings (where data is stored, database url / passwords)
in `transparency/settings/`:
- `.env` should contain **secret infos** (passwords)
- `base.py` or `dev.py` should contain the rest of the configuration
Read [Project Structure documentation](https://gitlab.com/quantmetry/qmtools/TemplateCookieCutter/blob/master/tutorials/organization.md) for more details.
## 5. Check your Continuous-Integration setup
By default this project comes equiped with a `.gitlab-ci.yml` file that
defines an ensamble of tests that are performed automatically by gitlab
every time code is pushed to the repo.
You can modify this file, remove or add steps. For more info read
[this tutorial](https://gitlab.com/quantmetry/qmtools/TemplateCookieCutter/blob/master/tutorials/integration.md)
You can read [more tutorials](https://gitlab.com/quantmetry/qmtools/TemplateCookieCutter/tree/master/tutorials)
for tips on git, continuous integration, linting, etc.
<file_sep>flake8==3.7.9
ipdb==0.12.2
jupyter==1.0.0
pylint==2.4.4
pytest==5.2.3
pytest-cov==2.8.1
scikit-learn==0.21.3
sphinx==2.2.1
sphinx_rtd_theme==0.4.3
shap==0.32.1
<file_sep># sorted in alphabetical order
pandas
python-dotenv
<file_sep># You can import another set of settings:
from .base import *
# You should create your own configuration file (that imports base or dev)
# Make sure to define what settings you are using the in the file settings/.env
# You can then define other settings or overwrite existing ones
# DATA_DIR = "/path/to/my/smaller/data/"
# maybe I am using my own database locally:
# DATABASE_INFOS = {
# 'host': 'localhost',
# 'port': 5432,
# 'username': 'postgre',
# 'password': <PASSWORD>('DATABASE_PASSWORD')
# }
<file_sep>"""
Contains all configurations for the projectself.
Should NOT contain any secrets.
>>> import settings
>>> settings.DATA_DIR
/Users/benjaminhabert/Documents/20170509_TaskForceDataEngineering_TemplateCode/data
"""
import os
import logging
# By default the data is stored in this repository's "data/" folder.
# You can change it in your own settings file.
REPO_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))
DATA_DIR = os.path.join(REPO_DIR, 'data')
DATABASE_INFOS = {
'host': 'pyox1k01',
'port': 1521,
'service_name': 'EDW00_PP2',
'username': 'EDW_ANA',
'password': <PASSWORD>('<PASSWORD>'), # this was loaded by load_dotenv
'default_schema': 'EDW_QUA',
}
# Logging
LOGGING_FORMAT = '[%(asctime)s][%(levelname)s][%(module)s] %(message)s'
LOGGING_DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
LOGGING_LEVEL = logging.DEBUG
logging.basicConfig(
format=LOGGING_FORMAT,
datefmt=LOGGING_DATE_FORMAT,
level=LOGGING_LEVEL
)
<file_sep>def compute_contributions(x, explainer, preprocessing=None):
"""
Compute Shapley contributions of a model against a prediction set.
Parameters
----------
x : pandas.DataFrame
Prediction set.
explainer : object
Any SHAP explainer already initialized with a model.
preprocessing : object, optional (default: None)
A single transformer, from sklearn or category_encoders
Returns
-------
pandas.DataFrame
Shapley contributions of the model on the prediction set, as computed by the explainer.
"""
if preprocessing is not None:
x = preprocessing.transform(x)
shap_values = explainer.shap_values(x)
bias = explainer.expected_value(x)
# TODO: check if contributions are between 0 and 1.
# If not, raise error and ask user to decompose sigmoid activated scores.
return shap_values, bias<file_sep>import unittest
from unittest.mock import Mock
from transparency.decomposition.contributions import compute_contributions
class TestContributions(unittest.TestCase):
def test_compute_contributions_1(self):
x = Mock()
explainer = Mock()
output = compute_contributions(x, explainer)
explainer.shap_values.assert_called()
explainer.expected_value.assert_called()
assert len(output) == 2
def test_compute_contributions_2(self):
x = Mock()
explainer = Mock()
preprocessing = Mock()
output = compute_contributions(x, explainer, preprocessing)
preprocessing.transform.assert_called()
explainer.shap_values.assert_called()
explainer.expected_value.assert_called()
assert len(output) == 2
<file_sep>import unittest
from transparency.utils.translate import translate
class TestTranslate(unittest.TestCase):
def test_translate(self):
elements = ['X_1', 'X_2']
mapping = {'X_1': 'รขge', 'X_2': 'profession'}
output = translate(elements, mapping)
expected = ['รขge', 'profession']
self.assertListEqual(output, expected)
<file_sep>import unittest
import shap
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from transparency.decomposition.contributions import compute_contributions
class TestContributions(unittest.TestCase):
def setUp(self):
x, y = load_iris(return_X_y=True)
self.x_train, self.y_train, self.x_test, self.y_test = train_test_split(x, y, random_state=1)
def check(self, s, b, x_test):
assert len(s) == 3
assert len(b) == 3
for i in range(2):
assert s[i].shape == x_test.shape
assert 0 <= s[i].min().min() <= 1
assert 0 <= s[i].max().max() <= 1
assert 0 <= b[i] <= 1
def test_compute_contributions_1(self):
model = RandomForestClassifier(n_estimators=3)
model.fit(self.x_train, self.y_train)
explainer = shap.TreeExplainer(model)
s, b = compute_contributions(self.x_test, explainer)
self.check(s, b, self.x_test)
def test_compute_contributions_2(self):
model = GradientBoostingClassifier(n_estimators=3)
model.fit(self.x_train, self.y_train)
explainer = shap.TreeExplainer(model)
s, b = compute_contributions(self.x_test, explainer)
self.check(s, b, self.x_test)
def test_compute_contributions_3(self):
model = LogisticRegression()
model.fit(self.x_train, self.y_train)
explainer = shap.LinearExplainer(model, self.x_train)
s, b = compute_contributions(self.x_test, explainer)
self.check(s, b, self.x_test)
def test_compute_contributions_4(self):
model = SVC()
model.fit(self.x_train, self.y_train)
explainer = shap.KernelExplainer(model, self.x_train)
s, b = compute_contributions(self.x_test, explainer)
self.check(s, b, self.x_test)
def test_compute_contributions_5(self):
model = MLPClassifier(hidden_layer_sizes=(3, ))
model.fit(self.x_train, self.y_train)
explainer = shap.DeepExplainer(model, self.x_train)
s, b = compute_contributions(self.x_test, explainer)
self.check(s, b, self.x_test)
# TODO : idem for lightgbm, XGBoost, keras, pytorch
<file_sep># CONFIGURATION
# Where to put HTML reports that you wan to expose using a web interface
REPORTS_DIR = notebooks
SOURCE_DIR = transparency
# Documentation
DOCUMENTATION_OUTPUT = $(REPORTS_DIR)/documentation
APIDOC_OPTIONS = -d 1 --no-toc --separate --force --private
COVERAGE_OUTPUT = $(REPORTS_DIR)/coverage
COVERAGE_OPTIONS = --cov-config coverage/.coveragerc --cov-report term --cov-report html
# .PHONY is used to distinguish between a task and an existing folder
.PHONY: doc pipeline tests coverage data_tests
.DEFAULT_GOAL := help
# PRODUCTION COMMANDS
pipeline: install ## run main project pipeline
python $(SOURCE_DIR)/application/main.py
# DEVELOPMENT COMMANDS
doc: install-dev ## build documentation from docstring
rm -rf doc/source/generated
sphinx-apidoc $(APIDOC_OPTIONS) -o doc/source/generated/ $(SOURCE_DIR) $(SOURCE_DIR)/tests
cd doc; make html
mkdir -p $(DOCUMENTATION_OUTPUT)
cp -r doc/build/html/* $(DOCUMENTATION_OUTPUT)
tests: install-dev ## run unit tests
pytest -s tests/unit_tests/
pytest -s tests/integration_tests/
coverage: install-dev ## run code coverage (% of code tested)
py.test $(COVERAGE_OPTIONS) --cov=$(SOURCE_DIR) tests/unit_tests/ | tee coverage/coverage.txt
mv -f .coverage coverage/.coverage # don't know how else to move it
mkdir -p $(COVERAGE_OUTPUT)
cp -r coverage/htmlcov/* $(COVERAGE_OUTPUT)
lint: install-dev ## Check that your code follows the PEP8 standards
flake8 $(SOURCE_DIR)
# PROJECT SETUP COMMANDS
install: requirements.txt ## install project dependencies (requirements.txt)
pip install -r requirements.txt
touch install
install-dev: install requirements.dev.txt ## install developpment dependencies (for testing, linting etc.)
pip install -r requirements.dev.txt
touch install-dev
init: ## initiate virtual environment
bash init.sh
touch init
# OTHER
help: ## Show this help.
# @fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/##//'
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
<file_sep>import logging
from .base import *
logger = logging.getLogger()
logger.setLevel(logging.WARNING)
|
54c5ff34abe643601ff850b51a8abc6daa6e188f
|
[
"Markdown",
"Python",
"Text",
"Makefile"
] | 11 |
Markdown
|
gmartinon/transparency
|
3173d6dd52283faae3d3562cbdb1f8dad2098947
|
b393ff0c64077a1dbfb55030a5825e0118299a86
|
refs/heads/master
|
<file_sep><?php
namespace Beesperester\WebDav\Response;
interface ResponseInterface
{
public function __construct($content = '', $status = 200, $headers = array());
}
<file_sep><?php
namespace Beesperester\WebDav\Response;
// Illumiante
use Illuminate\Http\Response as IlluminateResponse;
use Illuminate\Http\Request as IlluminateRequest;
abstract class Response extends IlluminateResponse implements ResponseInterface
{
/**
* @var SimpleXMLElement $xml
*/
public $xml;
/**
* Create new response from request
*
* @param Illuminate\Http\Request
*
* @return Response
*/
public static function fromRequest(Request $request)
{
$xml = simplexml_load_string($request->getContent());
$response = new static();
$response->xml = $xml;
return $response;
}
}
<file_sep><?php
// configure error reporting
error_reporting(E_ALL);
ini_set('display_errors', 1);
// require dependencies
require_once __DIR__.'/../vendor/autoload.php';
// Carbon
use Carbon\Carbon;
// Illuminate
use Illuminate\Http\Request;
// WebDav
use Beesperester\WebDav\WebDav;
use Beesperester\WebDav\Response\Propfind;
use Beesperester\WebDav\FS\Collection;
use Beesperester\WebDav\Exception\InvalidMethodException;
// basic server logic
$request = Request::createFromGlobals();
$path = $request->path();
$url = 'http://ubuntu.local';
$virtual_fs = WebDav::createCollection([
'displayname' => 'root',
'root' => true,
'base_url' => $url,
'creationdate' => Carbon::now(),
'getlastmodified' => Carbon::now(),
'children' => [
WebDav::createCollection([
'displayname' => 'foo',
'base_url' => $url,
'creationdate' => Carbon::now(),
'getlastmodified' => Carbon::now(),
'children' => [
WebDav::createCollection([
'displayname' => 'bar',
'base_url' => $url.'/foo',
'creationdate' => Carbon::now(),
'getlastmodified' => Carbon::now(),
]),
],
]),
WebDav::createFile([
'displayname' => 'foo.txt',
'base_url' => $url,
'creationdate' => Carbon::now(),
'getlastmodified' => Carbon::now(),
'getcontenttype' => 'text/plain',
'getcontentlength' => 123456,
]),
],
]);
function flatten(Collection $collection)
{
$flat_collection = [$collection->getUri() => $collection];
foreach ($collection->children as $resource) {
if ($resource instanceof Collection) {
$flat_collection = array_merge($flat_collection, flatten($resource));
} else {
$flat_collection[$resource->getUri()] = $resource;
}
}
return $flat_collection;
}
$flat_fs = flatten($virtual_fs);
//echo '<pre>'; print_r($flat_fs); die();
//echo '<pre>'; print_r(array_keys($flat_fs)); die();
/*if (in_array($request->fullUrl(), array_keys($flat_fs)))
{
$propfind = Propfind::fromCollection($flat_fs[$request->fullUrl()]);
$propfind->send();
}*/
WebDav::handleRequest($request, function ($request) {
switch (strtolower($request->method())) {
case 'get':
return;
case 'options':
return;
case 'propfind':
return Propfind::fromRequest($request);
}
throw new InvalidMethodException();
});
<file_sep><?php
namespace Beesperester\WebDav\Request;
// WebDav
use Beesperester\WebDav\Request\Request;
// SimpleXML
use \SimpleXMLElement;
// Illuminate
use Illuminate\Http\Response as IlluminateResponse;
class Get extends Request
{
/**
* Handle the request and return response
*
* @return Illuminate\Http\Response
*/
public function handle()
{
return IlluminateResponse::create(Null, 200)
->header('DAV', 1);
}
}
<file_sep><?php
function simplexml_append(SimpleXMLElement $to, SimpleXMLElement $from) {
$to_dom = dom_import_simplexml($to);
$from_dom = dom_import_simplexml($from);
$to_dom->appendChild($to_dom->ownerDocument->importNode($from_dom, true));
}
function simplexml_format(SimpleXMLElement $xml)
{
$dom = dom_import_simplexml($xml)
->ownerDocument;
$dom->formatOutput = true;
return $dom->saveXML();
}<file_sep><?php
namespace Beesperester\WebDav\FS;
// SimpleXML
use \SimpleXMLElement;
// WebDav
use Beesperester\WebDav\FS\Resource;
class Collection extends Resource
{
/**
* Array of children of collection
*
* @var array $children
*/
public $children;
/**
* Is collection the root?
*
* @var boolean $root
*/
public $root;
/**
* Create new collection from array
*
* @param array $data
*
* @return Collection
*/
public static function fromData(array $data)
{
$defaults = [
'base_url' => '',
'creationdate' => Null,
'displayname' => '',
'getlastmodified' => Null,
'root' => False,
'children' => []
];
$data = array_merge($defaults, $data);
$collection = new static($data['displayname'], $data['base_url'], $data['creationdate'], $data['getlastmodified']);
$collection->children = $data['children'];
$collection->root = $data['root'];
return $collection;
}
/**
* Get uri for resource
*
* @return string
*/
public function getUri()
{
$parts = [$this->base_url];
// add displayname if collection is not root
if (!$this->root)
{
$parts[] = $this->displayname;
}
// return href with trailing slash
return implode('/', array_filter($parts));
}
/**
* Extend function to add resourcetype element
* <response>
* [...]
* <propstat>
* <prop>
* [...]
* <resourcetype>
* <collection />
* </resourcetype>
* </prop>
* </propstat>
* </response>
*
* @return SimpleXMLElement;
*/
public function toXml()
{
$response = parent::toXml();
// add resourcetype element
$known_props = $response->xpath('/response/propstat/prop[@proptype="known"]');
if ($known_props) {
$resourcetype = new SimpleXMLElement('<resourcetype/>');
$resourcetype->addChild('collection');
simplexml_append($known_props[0], $resourcetype);
}
return $response;
}
}<file_sep><?php
namespace Beesperester\WebDav\FS;
// SimpleXML
use \SimpleXMLElement;
// WebDav
use Beesperester\WebDav\FS\Resource;
class File extends Resource
{
/**
* @var int
*/
public $getcontentlength;
/**
* @var string
*/
public $getcontenttype;
/**
* Extra attributes to look for when converting to xml
*
* @var array $include_attributes
*/
public $include_attributes = [
'getcontentlength',
'getcontenttype'
];
/**
* Create new file from array.
*
* @param array $data
*
* @return File
*/
public static function fromData(array $data)
{
$defaults = [
'base_url' => '',
'creationdate' => null,
'displayname' => '',
'getcontentlength' => 0,
'getcontenttype' => Null,
'getlastmodified' => null,
];
$data = array_merge($defaults, $data);
$file = new static($data['displayname'], $data['base_url'], $data['creationdate'], $data['getlastmodified']);
$file->getcontenttype = $data['getcontenttype'];
$file->getcontentlength = $data['getcontentlength'];
return $file;
}
}<file_sep><?php
namespace Beesperester\WebDav\Request;
interface RequestInterface
{
/**
* Handle the request and return response
*
* @return SimpleXMLElement
*/
public function handle();
}<file_sep><?php
namespace Beesperester\WebDav\FS;
interface ResourceInterface
{
/**
* Compile resource data to valid xml
*
* @return \SimpleXMLElement
*/
public function toXml();
/**
* Get uri for resource
*
* @return string
*/
public function getUri();
}
<file_sep><?php
namespace Beesperester\WebDav;
// Illuminate
use Illuminate\Http\Request;
#use Illuminate\Http\Response;
// Collection
use Beesperester\WebDav\FS\Collection;
use Beesperester\WebDav\FS\File;
// Request
use Beesperester\WebDav\Request\Get;
use Beesperester\WebDav\Request\Options;
use Beesperester\WebDav\Request\Propfind;
// Response
#use Beesperester\WebDav\Response\Collection;
// Exception
use Beesperester\WebDav\Exception\InvalidMethodException;
class WebDav
{
/**
* Handle request by WebDav client and return appropriate response
*
* @param Illuminate\Http\Request $request
*
* @return Illuminate\Http\Response
*/
public static function handleRequest(Request $request, $callback)
{
return $callback($request);
}
/**
* Handle propfind request and return response
*
* @return Illuminate\Http\Response
*/
public static function propfind($callback)
{
$request = Request::createFromGlobals();
return $callback($request->path());
}
public static function createCollection(array $data)
{
return Collection::fromData($data);
}
public static function createFile(array $data)
{
return File::fromData($data);
}
}
<file_sep><?php
namespace Beesperester\WebDav\Response;
// WebDav
use Beesperester\WebDav\FS\Collection;
class Propfind extends Response
{
/*public function __construct($content = '', $status = 200, $headers = array())
{
parent::__construct($content, $status, $headers);
}*/
public static function fromCollection(Collection $collection = null)
{
$xml = new \SimpleXMLElement('<multistatus/>');
$xml->addAttribute('xmlns', 'DAV:');
if ($collection) {
simplexml_append($xml, $collection->toXml());
if ($collection->children) {
foreach ($collection->children as $resource) {
simplexml_append($xml, $resource->toXml());
}
}
}
$content = simplexml_format($xml);
$status = 207;
$headers = [
'DAV' => 1,
];
return new static($content, $status, $headers);
}
}
<file_sep><?php
namespace Beesperester\WebDav\Request;
// SimpleXML
use \SimpleXMLElement;
abstract class Request implements RequestInterface
{
/**
* The request path
*
* @var string $request_path
*/
public $request_path;
/**
* The request body
*
* @var string $request_body
*/
public $request_body;
/**
* Construct new request from request path and request body
*
* @param string $request_path
* @param string $request_body
*/
public function __construct($request_path = '/', $request_body = '')
{
$this->request_body = $request_body;
$this->request_path = $request_path;
}
/**
* Create Request from path and body
*
* @param string $request_path
* @param string $request_body
*
* @return Request
*/
public static function create($request_path = '/', $request_body = '')
{
return new static($request_path, $request_body);
}
/**
* Handle the request and return response
*
* @return Illuminate\Http\Response
*/
abstract public function handle();
}
<file_sep># php-webdav
Php based library for creating WebDAV compatible responses that represents real or virtual filesystems.
<file_sep><?php
namespace Beesperester\WebDav\Exception;
use \Exception;
class InvalidMethodException extends Exception implements ExceptionInterface
{
}<file_sep><?php
namespace Beesperester\WebDav\Exception;
interface ExceptionInterface
{
}<file_sep><?php
namespace Beesperester\WebDav\FS;
// Carbon
use Carbon\Carbon;
// SimpleXML
use \SimpleXMLElement;
// WebDav
use Beesperester\WebDav\FS\ResourceInterface;
abstract class Resource implements ResourceInterface
{
/**
* @var string $base_url
*/
public $base_url;
/**
* @var Carbon\Carbon $creationdate
*/
public $creationdate;
/**
* @var string $displayname
*/
public $displayname;
/**
* @var Carbon\Carbon $getlastmodified
*/
public $getlastmodified;
/**
* Attributes to look for when converting to xml
*
* @var array $base_attributes
*/
protected $base_attributes = [
'creationdate',
'displayname',
'getlastmodified'
];
/**
* Extra attributes to look for when converting to xml
*
* @var array $include_attributes
*/
public $include_attributes = [];
public function __construct($displayname, $base_url, Carbon $creationdate = Null, Carbon $getlastmodified = Null)
{
$this->base_url = $base_url;
$this->creationdate = $creationdate;
$this->displayname = $displayname;
$this->getlastmodified = $getlastmodified;
}
/**
* Get uri for resource
*
* @return string
*/
public function getUri()
{
$parts = [$this->base_url, $this->displayname];
return implode('/', array_filter($parts));
}
/**
* Get resource attributes
*
* @return array
*/
public function getAttributes()
{
return array_unique(array_merge($this->base_attributes, $this->include_attributes));
}
/**
* Create valid WebDAV xml response
* for known and unknown props
*
* <response>
* <href>http://example.com/resource</href>
* <propstat>
* <prop>
* <displayname>resource name</displayname>
* <creationdate>Date - Time</creationdate>
* </prop>
* </propstat>
* <propstat>
* <prop>
* <getlastmodified/>
* </prop>
* </propstat>
* </response>
*
* @return SimpleXMLElement;
*/
public function toXml()
{
$response = new SimpleXMLElement('<response/>');
// add href
$response->addChild('href', $this->getUri());
// known attributes
$known = array_filter($this->getAttributes(), function($attribute) {
return !empty($this->$attribute);
});
if ($known) {
// add propstat element
$propstat = $response->addChild('propstat');
// add prop element
$prop = $propstat->addChild('prop');
$prop->addAttribute('proptype', 'known');
foreach($known as $attribute) {
// add attribute
if ($this->$attribute instanceof Carbon) {
// add creationdate in Iso 8601 format
if ($attribute === 'creationdate') {
$prop->addChild($attribute, $this->$attribute->toIso8601String());
}
// add getlastmodified in Rfc 1036 format
if ($attribute === 'getlastmodified') {
$prop->addChild($attribute, $this->$attribute->toRfc1036String());
}
} else {
$prop->addChild($attribute, $this->$attribute);
}
}
}
// unknown attributes
$unknown = array_diff($this->getAttributes(), $known);
if ($unknown) {
// add propstat element
$propstat = $response->addChild('propstat');
// add prop element
$prop = $propstat->addChild('prop');
$prop->addAttribute('proptype', 'unknown');
foreach($unknown as $attribute) {
$prop->addChild($attribute);
}
}
return $response;
}
}
<file_sep><?php
namespace Beesperester\WebDav\Request;
// WebDav
use Beesperester\WebDav\Request\Request;
// SimpleXML
use \SimpleXMLElement;
// Illuminate
use Illuminate\Http\Response as IlluminateResponse;
class Options extends Request
{
public static $methods = [
'COPY',
'DELETE',
'GET',
'HEAD',
'MKCOL',
'MOVE',
'OPTIONS',
'POST',
'PROPFIND',
'PROPPATCH',
'PUT'
];
/**
* Handle the request and return response
*
* @return Illuminate\Http\Response
*/
public function handle()
{
return IlluminateResponse::create(Null, 200)
->header('Allows', strtoupper(implode(' ', static::$methods)))
->header('DAV', 1);
}
}
<file_sep><?php
namespace Beesperester\WebDav\Request;
// WebDav
use Beesperester\WebDav\Request\Request;
// SimpleXML
use \SimpleXMLElement;
// Illuminate
use Illuminate\Http\Response as IlluminateResponse;
class Propfind extends Request
{
protected $collection = [
[
'displayname' => 'some_dir',
'href' => '/some_dir',
'collection' => []
],
[
'displayname' => 'some_other_dir',
'href' => '/some_other_dir',
'collection' => [
[
'displayname' => 'some_sub_dir',
'href' => '/some_other_dir/some_sub_dir'
]
]
]
];
/**
* Handle the request and return response
*
* @return Illuminate\Http\Response
*/
public function handle()
{
$xml = new \SimpleXMLElement('<multistatus/>');
$xml->addAttribute('xmlns', 'DAV:');
$response = $xml->addChild('response');
$href = $response->addChild('href', '/');
// known
$propstat = $response->addChild('propstat');
$prop = $propstat->addChild('prop');
$prop->addChild('displayname', 'root');
$prop->addChild('resourcetype')->addChild('collection');
foreach ($this->collection as $collection) {
$response = $xml->addChild('response');
$href = $response->addChild('href', $collection['href']);
// known
$propstat = $response->addChild('propstat');
$prop = $propstat->addChild('prop');
$prop->addChild('displayname', $collection['displayname']);
$prop->addChild('resourcetype')->addChild('collection');
}
$content = simplexml_format($xml);
return IlluminateResponse::create($content, 207)
->header('DAV', 1);
}
}
|
e19562cf183162f0da49b5cbf9bbe29561965d8c
|
[
"Markdown",
"PHP"
] | 18 |
PHP
|
beesperester/php-webdav
|
479794a93f100197e737a9b4a1309ac5b5f9db04
|
da46066a30f0f201f61076f132897c111b50f839
|
refs/heads/master
|
<repo_name>5412/swoole-test<file_sep>/coroutine-channel-test/pool.php
<?php
class RedisPool {
protected $pool;
function __construct($size = 10)
{
$this->pool = new \Swoole\Coroutine\Channel($size);
for ($i = 0; $i < $size; $i++) {
$redis = new \Swoole\Coroutine\Redis();
$res = $redis->connect('127.0.0.1', 6379);
if (false === $res) {
throw new RuntimeException('failed to connect redis server');
} else {
$this->put($redis);
}
}
}
function put($redis)
{
if ($redis instanceof \Swoole\Coroutine\Redis) {
$this->stats();
$this->pool->push($redis);
} else {
throw new RuntimeException('must push a \Swoole\Coroutine\Redis implementation');
}
}
function get()
{
return $this->pool->pop();
}
function stats()
{
var_export($this->pool->stats());
}
}
//$server = new swoole_http_server('0.0.0.0', 9999);
//$server->on('WorkerStart', function () {
// $pool = new RedisPool(10);
//
// $redis = $pool->get();
//
// $list = $redis->get('clientList');
// var_dump($list);
//});
//
//$server->on('request', function ($request, $response) {
// $response->end('hi ha hi');
//});
//
//$server->start();
go(function () {
$pool = new RedisPool(10);
$redis = $pool->get();
$list = $redis->get('clientList');
$pool->stats();
//var_dump($list);
});
<file_sep>/socket-server-test/socketServer.php
<?php
$socket = new Swoole\Coroutine\Socket(AF_INET, SOCK_STREAM, 0);
$socket->bind('127.0.0.1', 9001);
$socket->listen(128);
go(function () use($socket) {
while (true) {
echo "Accept: \n";
$client = $socket->accept();
if ($client === false) {
var_dump($socket->errCode);
} else {
var_dump($socket);
$str = $client->recv();
var_dump($str);
$res = $client->send('12312');
var_dump($res);
}
}
});<file_sep>/coroutine-asyc-call-test/conCallClient.php
<?php
echo 'script begin', PHP_EOL;
go(function () {
echo 'setDefer' , PHP_EOL;
$beginTime = microtime(1);
//ๅนถๅ่ฏทๆฑ n
$n = 1000;
for ($i = 0; $i < $n; $i++) {
$cli = new Swoole\Coroutine\Http\Client('127.0.0.1', 8081);
$cli->setHeaders([
'Host' => "local.ad.oa.com",
"User-Agent" => 'Chrome/49.0.2587.3',
'Accept' => 'text/html,application/xhtml+xml,application/xml',
'Accept-Encoding' => 'gzip',
]);
$cli->set([ 'timeout' => 2]);
$cli->setDefer();
$cli->get('/');
$clients[] = $cli;
}
for ($i = 0; $i < $n; $i++) {
$r = $clients [$i]->recv();
$clients [$i]->close();
$result[] = $clients[$i]->body;
}
//$str = var_export($result);
$endTime = microtime(1);
echo 'timing: ', $endTime - $beginTime, PHP_EOL;
});<file_sep>/coroutine-redis-test/redisClient.php
<?php
const REDIS_SERVER_HOST = '127.0.0.1';
const REDIS_SERVER_PORT = 6379;
//go(function () {
// $redis = new Swoole\Coroutine\Redis();
// $res = $redis->connect(REDIS_SERVER_HOST, REDIS_SERVER_PORT);
//
// $redis->setDefer();
// $redis->set('key1', 'value');
// $redis2 = new Swoole\Coroutine\Redis();
// $redis2->connect(REDIS_SERVER_HOST, REDIS_SERVER_PORT);
// $redis2->setDefer();
// $redis2->get('key1');
// $result1 = $redis->recv();
// $result2 = $redis2->recv();
//
// var_dump($result1, $result2);
//});
//
//go(function () {
// //co::sleep(1);
// $redis = new Swoole\Coroutine\Redis();
// $redis->connect(REDIS_SERVER_HOST, REDIS_SERVER_PORT);
// $redis->setDefer();
// $redis->set('key1', 'value1');
// $redis->get('key1');
// $result1 = $redis->recv();
// $result2 = $redis->recv();
//
// var_dump($result1, $result2);
//});
go(function () {
$redis = new Swoole\Coroutine\Redis();
$res = $redis->connect(REDIS_SERVER_HOST, REDIS_SERVER_PORT);
if (!$res) {
echo 'Can\'t connect host' . REDIS_SERVER_HOST . ':' . REDIS_SERVER_PORT. PHP_EOL;
return;
}
//$redis->setDefer();
$value = $redis->get('key1');
$redis->lPush('l1', 'A', 'B', 'C');
$list = $redis->lGet('l1', 2);
$list = $redis->lRange('l1', 0, -1);
//$value = $redis->recv();
var_dump($list);
});
echo 'main script', PHP_EOL;
<file_sep>/coroutine-http-client-test/webSocketClient.php
<?php
echo 'main script', PHP_EOL;
go(function () {
$cli = new Swoole\Coroutine\Http\Client("localhost", 9502);
$ret = $cli->upgrade("/");
var_dump($ret);
echo $ret, PHP_EOL;
if ($ret) {
$cli->push("hello");
echo '1', PHP_EOL;
var_dump($cli->recv());
co::sleep(0.1);
while (1) {
var_dump($cli->recv());
}
}
});
<file_sep>/timer/index.php
<?php
echo microtime(1), 'outside', PHP_EOL;
echo 1, PHP_EOL;
Swoole\Event::defer(function () {
echo microtime(1), 'inside defer', PHP_EOL;
echo "hello\n";
});
echo 2, PHP_EOL;
echo microtime(1), 'outside', PHP_EOL;
$count = new \Swoole\Atomic(0);
Swoole\Timer::tick(1000, function ($timer_id, $a, $b) use ($count) {
echo microtime(1), 'inside tick', PHP_EOL;
echo $count->get(), PHP_EOL;
// if ($count->get() === 10) {
// \Swoole\Timer::clear($timer_id);
// } // ๆฌๆฌกๆธ
้คๆฌๆฌกๅพช็ฏไผๆง่กๅฎ
echo 'timer: ', $timer_id, ' start at: ', microtime(1), PHP_EOL;
$c = $count->get();
Swoole\Timer::tick(3000, function () use ($c) {
echo microtime(1), 'inside tick2', PHP_EOL;
echo $c, " inside loop.\n";
$args = func_get_args();
\Swoole\Timer::clear($args[0]);
});
echo 'a: ', $a, ' b: ', $b, PHP_EOL;
if ($count->get() === 3) {
\Swoole\Timer::clear($timer_id);
}
$count->add(1);
echo microtime(1), 'inside tick', PHP_EOL;
}, 1, 2);
echo microtime(1), 'outside', PHP_EOL;
sleep(10); // ้ปๅกไบๅฎๆถๅจ็่งฆๅ
echo microtime(1), 'outside', PHP_EOL;
<file_sep>/server/taskWait.php
<?php
$server = new \Swoole\Server('127.0.0.1', 10000);
$server->set([
'worker_num' => 2,
'task_worker_num' => 4, // ไธๆๅฎๆญค้กนไธ่ฝๆง่กtaskไปปๅก
]);
$server->on('connect', function (swoole_server $server, $fd) {
});
$server->on('receive', function (swoole_server $server, $fd, $from_id, $data) {
echo 'Receive data:', $data, PHP_EOL;
$data = trim($data);
$task_id = $server->task($data, 1);
$server->send($fd, "ๅๅไปปๅก๏ผไปปๅกidไธบ$task_id\n");
$tasks[] = mt_rand(1000, 9999); //ไปปๅก1
$tasks[] = mt_rand(1000, 9999); //ไปปๅก2
$tasks[] = mt_rand(1000, 9999); //ไปปๅก3
var_dump($tasks);
//็ญๅพ
ๆๆTask็ปๆ่ฟๅ๏ผ่ถ
ๆถไธบ10s
$results = $server->taskWaitMulti($tasks, 10.0);
if (!isset($results[0])) {
echo "ไปปๅก1ๆง่ก่ถ
ๆถไบ\n";
}
if (isset($results[1])) {
echo "ไปปๅก2็ๆง่ก็ปๆไธบ{$results[1]}\n";
}
if (isset($results[2])) {
echo "ไปปๅก3็ๆง่ก็ปๆไธบ{$results[2]}\n";
}
});
$server->on('close', function () {
});
$server->on('task', function (swoole_server $server, $task_id, $from_id, $data) {
echo $from_id, 'Task receive data: ', $data, PHP_EOL;
echo "#{$server->worker_id}\tonTask: [PID={$server->worker_pid}]: task_id=$task_id, data_len=".strlen($data).".".PHP_EOL;
$server->finish($data);
});
$server->on('finish', function (swoole_server $server, $task_id, $data) {
echo $task_id, 'task is finished, data: ', $data, PHP_EOL;
//$server->send($task_id, 'task finished');
});
$server->on('workerStart', function ($serv, $worker_id) {
// global $argv;
// if($worker_id >= $serv->setting['worker_num']) {
// swoole_set_process_name("php {$argv[0]}: task_worker");
// } else {
// swoole_set_process_name("php {$argv[0]}: worker");
// }
});
$server->start();<file_sep>/server/tcpClient.php
<?php
$client = new swoole_client(SWOOLE_SOCK_TCP, SWOOLE_SOCK_ASYNC);
$client->on("connect", function($cli) {
$cli->send("hello wo2rld\n");
});
$client->on("receive", function($cli, $data){
echo "received: {$data}\n";
//$cli->send('task');
//$cli->close();
});
$client->on("error", function($cli){
echo "connect failed\n";
});
$client->on("close", function($cli){
echo "connection close\n";
});
$res = $client->connect("127.0.0.1", 9515, 1);
if (false === $res) {
echo $client->errCode;
}
<file_sep>/index.php
<?php
$http = new swoole_http_server('0.0.0.0', 8081);
$http->on('request', function ($request, $response) {
//var_dump($request->get, $request->post);
$response->header("Content-Type", "text/html; charset=utf-8");
//sleep(1);
$response->end("<h1>Hello World</h1>" . "ๅฐpeclๅฎ็ฝไธ่ฝฝๆๆฐ็stable ็ๆฌ
่งฃๅ
phpize
./configure --enable-async-redis --enable-mysqlnd --enable-coroutine --enable-openssl --with-openssl-dir=/usr/local/opt/openssl --enable-sockets
ๆณจ๏ผๅผๅฏasync-redis ๅฟ
้กปๅ
ๅฎ่ฃ
hiredis mac ๅฎ่ฃ
Hiredis
ๆณจ๏ผๅผๅฏhttp2 ๅฟ
้กปๅ
ๅฎ่ฃ
nghttp2
ๆณจ๏ผnghttp2 ๅฎ่ฃ
ๆนๅผ wget https://github.com/nghttp2/nghttp2/releases/download/v1.30.0/nghttp2-1.30.0.tar.bz2 && tar -jxvf nghttp2-1.30.0.tar.bz2 && cd nghttp2-1.30.0 && ./configure && make && make install ไธๆกๆฒกๅๆณๆง่กๅๅผๆง่ก
make && make install
่งฃๅ
phpize
./configure --enable-async-redis --enable-mysqlnd --enable-coroutine --enable-openssl --with-openssl-dir=/usr/local/opt/openssl --enable-sockets
ๆณจ๏ผๅผๅฏasync-redis ๅฟ
้กปๅ
ๅฎ่ฃ
hiredis mac ๅฎ่ฃ
Hiredis
ๆณจ๏ผๅผๅฏhttp2 ๅฟ
้กปๅ
ๅฎ่ฃ
nghttp2
ๆณจ๏ผnghttp2 ๅฎ่ฃ
ๆนๅผ wget https://github.com/nghttp2/nghttp2/releases/download/v1.30.0/nghttp2-1.30.0.tar.bz2 && tar -jxvf nghttp2-1.30.0.tar.bz2 && cd nghttp2-1.30.0 && ./configure && make && make install ไธๆกๆฒกๅๆณๆง่กๅๅผๆง่ก
make && make install
่งฃๅ
phpize
./configure --enable-async-redis --enable-mysqlnd --enable-coroutine --enable-openssl --with-openssl-dir=/usr/local/opt/openssl --enable-sockets
ๆณจ๏ผๅผๅฏasync-redis ๅฟ
้กปๅ
ๅฎ่ฃ
hiredis mac ๅฎ่ฃ
Hiredis
ๆณจ๏ผๅผๅฏhttp2 ๅฟ
้กปๅ
ๅฎ่ฃ
nghttp2
ๆณจ๏ผnghttp2 ๅฎ่ฃ
ๆนๅผ wget https://github.com/nghttp2/nghttp2/releases/download/v1.30.0/nghttp2-1.30.0.tar.bz2 && tar -jxvf nghttp2-1.30.0.tar.bz2 && cd nghttp2-1.30.0 && ./configure && make && make install ไธๆกๆฒกๅๆณๆง่กๅๅผๆง่ก
make && make install
่งฃๅ
phpize
./configure --enable-async-redis --enable-mysqlnd --enable-coroutine --enable-openssl --with-openssl-dir=/usr/local/opt/openssl --enable-sockets
ๆณจ๏ผๅผๅฏasync-redis ๅฟ
้กปๅ
ๅฎ่ฃ
hiredis mac ๅฎ่ฃ
Hiredis
ๆณจ๏ผๅผๅฏhttp2 ๅฟ
้กปๅ
ๅฎ่ฃ
nghttp2
ๆณจ๏ผnghttp2 ๅฎ่ฃ
ๆนๅผ wget https://github.com/nghttp2/nghttp2/releases/download/v1.30.0/nghttp2-1.30.0.tar.bz2 && tar -jxvf nghttp2-1.30.0.tar.bz2 && cd nghttp2-1.30.0 && ./configure && make && make install ไธๆกๆฒกๅๆณๆง่กๅๅผๆง่ก
make && make install
่งฃๅ
phpize
./configure --enable-async-redis --enable-mysqlnd --enable-coroutine --enable-openssl --with-openssl-dir=/usr/local/opt/openssl --enable-sockets
ๆณจ๏ผๅผๅฏasync-redis ๅฟ
้กปๅ
ๅฎ่ฃ
hiredis mac ๅฎ่ฃ
Hiredis
ๆณจ๏ผๅผๅฏhttp2 ๅฟ
้กปๅ
ๅฎ่ฃ
nghttp2
ๆณจ๏ผnghttp2 ๅฎ่ฃ
ๆนๅผ wget https://github.com/nghttp2/nghttp2/releases/download/v1.30.0/nghttp2-1.30.0.tar.bz2 && tar -jxvf nghttp2-1.30.0.tar.bz2 && cd nghttp2-1.30.0 && ./configure && make && make install ไธๆกๆฒกๅๆณๆง่กๅๅผๆง่ก
make && make install
่งฃๅ
phpize
./configure --enable-async-redis --enable-mysqlnd --enable-coroutine --enable-openssl --with-openssl-dir=/usr/local/opt/openssl --enable-sockets
ๆณจ๏ผๅผๅฏasync-redis ๅฟ
้กปๅ
ๅฎ่ฃ
hiredis mac ๅฎ่ฃ
Hiredis
ๆณจ๏ผๅผๅฏhttp2 ๅฟ
้กปๅ
ๅฎ่ฃ
nghttp2
ๆณจ๏ผnghttp2 ๅฎ่ฃ
ๆนๅผ wget https://github.com/nghttp2/nghttp2/releases/download/v1.30.0/nghttp2-1.30.0.tar.bz2 && tar -jxvf nghttp2-1.30.0.tar.bz2 && cd nghttp2-1.30.0 && ./configure && make && make install ไธๆกๆฒกๅๆณๆง่กๅๅผๆง่ก
make && make install
่งฃๅ
phpize
./configure --enable-async-redis --enable-mysqlnd --enable-coroutine --enable-openssl --with-openssl-dir=/usr/local/opt/openssl --enable-sockets
ๆณจ๏ผๅผๅฏasync-redis ๅฟ
้กปๅ
ๅฎ่ฃ
hiredis mac ๅฎ่ฃ
Hiredis
ๆณจ๏ผๅผๅฏhttp2 ๅฟ
้กปๅ
ๅฎ่ฃ
nghttp2
ๆณจ๏ผnghttp2 ๅฎ่ฃ
ๆนๅผ wget https://github.com/nghttp2/nghttp2/releases/download/v1.30.0/nghttp2-1.30.0.tar.bz2 && tar -jxvf nghttp2-1.30.0.tar.bz2 && cd nghttp2-1.30.0 && ./configure && make && make install ไธๆกๆฒกๅๆณๆง่กๅๅผๆง่ก
make && make install
่งฃๅ
phpize
./configure --enable-async-redis --enable-mysqlnd --enable-coroutine --enable-openssl --with-openssl-dir=/usr/local/opt/openssl --enable-sockets
ๆณจ๏ผๅผๅฏasync-redis ๅฟ
้กปๅ
ๅฎ่ฃ
hiredis mac ๅฎ่ฃ
Hiredis
ๆณจ๏ผๅผๅฏhttp2 ๅฟ
้กปๅ
ๅฎ่ฃ
nghttp2
ๆณจ๏ผnghttp2 ๅฎ่ฃ
ๆนๅผ wget https://github.com/nghttp2/nghttp2/releases/download/v1.30.0/nghttp2-1.30.0.tar.bz2 && tar -jxvf nghttp2-1.30.0.tar.bz2 && cd nghttp2-1.30.0 && ./configure && make && make install ไธๆกๆฒกๅๆณๆง่กๅๅผๆง่ก
make && make install
่งฃๅ
phpize
./configure --enable-async-redis --enable-mysqlnd --enable-coroutine --enable-openssl --with-openssl-dir=/usr/local/opt/openssl --enable-sockets
ๆณจ๏ผๅผๅฏasync-redis ๅฟ
้กปๅ
ๅฎ่ฃ
hiredis mac ๅฎ่ฃ
Hiredis
ๆณจ๏ผๅผๅฏhttp2 ๅฟ
้กปๅ
ๅฎ่ฃ
nghttp2
ๆณจ๏ผnghttp2 ๅฎ่ฃ
ๆนๅผ wget https://github.com/nghttp2/nghttp2/releases/download/v1.30.0/nghttp2-1.30.0.tar.bz2 && tar -jxvf nghttp2-1.30.0.tar.bz2 && cd nghttp2-1.30.0 && ./configure && make && make install ไธๆกๆฒกๅๆณๆง่กๅๅผๆง่ก
make && make install
่งฃๅ
phpize
./configure --enable-async-redis --enable-mysqlnd --enable-coroutine --enable-openssl --with-openssl-dir=/usr/local/opt/openssl --enable-sockets
ๆณจ๏ผๅผๅฏasync-redis ๅฟ
้กปๅ
ๅฎ่ฃ
hiredis mac ๅฎ่ฃ
Hiredis
ๆณจ๏ผๅผๅฏhttp2 ๅฟ
้กปๅ
ๅฎ่ฃ
nghttp2
ๆณจ๏ผnghttp2 ๅฎ่ฃ
ๆนๅผ wget https://github.com/nghttp2/nghttp2/releases/download/v1.30.0/nghttp2-1.30.0.tar.bz2 && tar -jxvf nghttp2-1.30.0.tar.bz2 && cd nghttp2-1.30.0 && ./configure && make && make install ไธๆกๆฒกๅๆณๆง่กๅๅผๆง่ก
make && make install
่งฃๅ
phpize
./configure --enable-async-redis --enable-mysqlnd --enable-coroutine --enable-openssl --with-openssl-dir=/usr/local/opt/openssl --enable-sockets
ๆณจ๏ผๅผๅฏasync-redis ๅฟ
้กปๅ
ๅฎ่ฃ
hiredis mac ๅฎ่ฃ
Hiredis
ๆณจ๏ผๅผๅฏhttp2 ๅฟ
้กปๅ
ๅฎ่ฃ
nghttp2
ๆณจ๏ผnghttp2 ๅฎ่ฃ
ๆนๅผ wget https://github.com/nghttp2/nghttp2/releases/download/v1.30.0/nghttp2-1.30.0.tar.bz2 && tar -jxvf nghttp2-1.30.0.tar.bz2 && cd nghttp2-1.30.0 && ./configure && make && make install ไธๆกๆฒกๅๆณๆง่กๅๅผๆง่ก
make && make install
่งฃๅ
phpize
./configure --enable-async-redis --enable-mysqlnd --enable-coroutine --enable-openssl --with-openssl-dir=/usr/local/opt/openssl --enable-sockets
ๆณจ๏ผๅผๅฏasync-redis ๅฟ
้กปๅ
ๅฎ่ฃ
hiredis mac ๅฎ่ฃ
Hiredis
ๆณจ๏ผๅผๅฏhttp2 ๅฟ
้กปๅ
ๅฎ่ฃ
nghttp2
ๆณจ๏ผnghttp2 ๅฎ่ฃ
ๆนๅผ wget https://github.com/nghttp2/nghttp2/releases/download/v1.30.0/nghttp2-1.30.0.tar.bz2 && tar -jxvf nghttp2-1.30.0.tar.bz2 && cd nghttp2-1.30.0 && ./configure && make && make install ไธๆกๆฒกๅๆณๆง่กๅๅผๆง่ก
make && make install
่งฃๅ
phpize
./configure --enable-async-redis --enable-mysqlnd --enable-coroutine --enable-openssl --with-openssl-dir=/usr/local/opt/openssl --enable-sockets
ๆณจ๏ผๅผๅฏasync-redis ๅฟ
้กปๅ
ๅฎ่ฃ
hiredis mac ๅฎ่ฃ
Hiredis
ๆณจ๏ผๅผๅฏhttp2 ๅฟ
้กปๅ
ๅฎ่ฃ
nghttp2
ๆณจ๏ผnghttp2 ๅฎ่ฃ
ๆนๅผ wget https://github.com/nghttp2/nghttp2/releases/download/v1.30.0/nghttp2-1.30.0.tar.bz2 && tar -jxvf nghttp2-1.30.0.tar.bz2 && cd nghttp2-1.30.0 && ./configure && make && make install ไธๆกๆฒกๅๆณๆง่กๅๅผๆง่ก
make && make install
่งฃๅ
phpize
./configure --enable-async-redis --enable-mysqlnd --enable-coroutine --enable-openssl --with-openssl-dir=/usr/local/opt/openssl --enable-sockets
ๆณจ๏ผๅผๅฏasync-redis ๅฟ
้กปๅ
ๅฎ่ฃ
hiredis mac ๅฎ่ฃ
Hiredis
ๆณจ๏ผๅผๅฏhttp2 ๅฟ
้กปๅ
ๅฎ่ฃ
nghttp2
ๆณจ๏ผnghttp2 ๅฎ่ฃ
ๆนๅผ wget https://github.com/nghttp2/nghttp2/releases/download/v1.30.0/nghttp2-1.30.0.tar.bz2 && tar -jxvf nghttp2-1.30.0.tar.bz2 && cd nghttp2-1.30.0 && ./configure && make && make install ไธๆกๆฒกๅๆณๆง่กๅๅผๆง่ก
make && make install
่งฃๅ
phpize
./configure --enable-async-redis --enable-mysqlnd --enable-coroutine --enable-openssl --with-openssl-dir=/usr/local/opt/openssl --enable-sockets
ๆณจ๏ผๅผๅฏasync-redis ๅฟ
้กปๅ
ๅฎ่ฃ
hiredis mac ๅฎ่ฃ
Hiredis
ๆณจ๏ผๅผๅฏhttp2 ๅฟ
้กปๅ
ๅฎ่ฃ
nghttp2
ๆณจ๏ผnghttp2 ๅฎ่ฃ
ๆนๅผ wget https://github.com/nghttp2/nghttp2/releases/download/v1.30.0/nghttp2-1.30.0.tar.bz2 && tar -jxvf nghttp2-1.30.0.tar.bz2 && cd nghttp2-1.30.0 && ./configure && make && make install ไธๆกๆฒกๅๆณๆง่กๅๅผๆง่ก
make && make install");
});
$http->start();
<file_sep>/server/socketServer.php
<?php
$server = new \Swoole\Server('127.0.0.1', 10003, SWOOLE_PROCESS, SWOOLE_SOCK_TCP);
$server->set([
'worker_num' => 4,
//'daemonize' => true,
'backlog' => 128,
]);
//$server->addlistener("127.0.0.1", 9502, SWOOLE_SOCK_TCP);
//$server->addlistener("172.24.42.134", 9503, SWOOLE_SOCK_TCP);
//$server->addlistener("0.0.0.0", 9506, SWOOLE_SOCK_UDP);
////UnixSocket Stream
//$server->addlistener("/Users/bjhl/sockets/myserv.sock", 0, SWOOLE_UNIX_STREAM);
////TCP + SSL
////$server->addlistener("127.0.0.1", 9505, SWOOLE_SOCK_TCP | SWOOLE_SSL);
$server->on('WorkerStart', function($serv, $workerId) {
echo $workerId, PHP_EOL; //ๆญคๆฐ็ปไธญ็ๆไปถ่กจ็คบ่ฟ็จๅฏๅจๅๅฐฑๅ ่ฝฝไบ๏ผๆไปฅๆ ๆณreload
});
$server->on('start', function () use ($server) {
echo 'manager pid is ', $server->manager_pid, PHP_EOL;
echo 'master pid is ', $server->master_pid, PHP_EOL;
});
$server->on('connect', function ($server, $fd) {
echo 'someone connect us' , $fd, PHP_EOL;
});
$server->on('packet', function () {
echo 'someone packet us';
});
$server->on('receive', function ($server, $fd, $reactor_id, $data) {
echo 'receive something', $data, PHP_EOL;
$server->send($fd, "Swoole: {$data}");
//$server->close($fd);
});
$server->on('close', function ($server, $fd) {
echo 'someone close', $fd, PHP_EOL;
});
$server->start();<file_sep>/http-response-test/httpResponseDetachTestServer.php
<?php
$http = new swoole_http_server('0.0.0.0', 8001, SWOOLE_PROCESS);
$http->set([
'task_worker_num' => 1,
'worker_num' => 1,
'package_max_length' => 1024, // post ๅฐบๅฏธ้ๅถ
'upload_tmp_dir' => '/data/uploadfiles/',
'http_parse_post' => false, // ่ชๅจๅฐContent-Typeไธบx-www-form-urlencoded็่ฏทๆฑๅ
ไฝ่งฃๆๅฐPOSTๆฐ็ป
'http_parse_cookie' => false, // ๅ
ณ้ญCookie่งฃๆ๏ผๅฐๅจheaderไธญไฟ็ๆช็ปๅค็็ๅๅง็Cookiesไฟกๆฏใ
// ็ฎๅๆฏๆgzipใbrใdeflate ไธ็งๅ็ผฉๆ ผๅผ๏ผ
//ๅบๅฑไผๆ นๆฎๆต่งๅจๅฎขๆท็ซฏไผ ๅ
ฅ็Accept-Encodingๅคด่ชๅจ้ๆฉๅ็ผฉๆนๅผใhttp-chunkไธๆฏๆๅๆฎตๅ็ฌๅ็ผฉ, ๅทฒๅผบๅถๅ
ณ้ญๅ็ผฉ.
'http_compression' => true,
// ่ฎพ็ฝฎdocument_rootๅนถ่ฎพ็ฝฎenable_static_handlerไธบtrueๅ๏ผ
//ๅบๅฑๆถๅฐHttp่ฏทๆฑไผๅ
ๅคๆญdocument_root่ทฏๅพไธๆฏๅฆๅญๅจๆญคๆไปถ๏ผๅฆๆๅญๅจไผ็ดๆฅๅ้ๆไปถๅ
ๅฎน็ปๅฎขๆท็ซฏ๏ผไธๅ่งฆๅonRequestๅ่ฐใ
'document_root' => '/data/webroot/example.com', // v4.4.0ไปฅไธ็ๆฌ, ๆญคๅคๅฟ
้กปไธบ็ปๅฏน่ทฏๅพ
'enable_static_handler' => true,
//่ฎพ็ฝฎ้ๆๅค็ๅจ็่ทฏๅพใ ไพๅฆ/static/test.jpgไผๅคๆญๆฏๅฆๅญๅจ$document_root/static/test.jpg๏ผๅฆๆๅญๅจๅๅ้ๆไปถๅ
ๅฎน๏ผไธๅญๅจ่ฟๅ404้่ฏฏใ
"static_handler_locations" => ['/static', '/app/images'],
]);
$http->on('request', function (swoole_http_request $request, swoole_http_response $response) use ($http) {
try {
$response->detach();
var_export($response->fd);
$http->task(strval($response->fd));
} catch (Exception $e) {
$response->end($e->getMessage());
}
});
$http->on('finish', function ($data)
{
echo "task finish";
});
$http->on('task', function ($serv, $task_id, $worker_id, $data)
{
$resp = Swoole\Http\Response::create($data);
$resp->end("in task");
echo "async task\n";
$serv->finish('1'); // finishๅฟ
้กป่ฐ็จๅจontask้
});
$http->start();<file_sep>/server/webSocketServer.php
<?php
$server = new swoole_websocket_server("127.0.0.1", 9502);
$server->set([
'enable_static_handler' => true,
'document_root' => __DIR__ . '/web',
]);
$server->on('message', function($server, $frame) {
echo "received message: {$frame->data}\n";
$server->push($frame->fd, json_encode(["hello", "world"]));
});
$server->on('close', function($server, $fd) {
echo "connection close: {$fd}\n";
});
$tcp = $server->listen("0.0.0.0", 9515, SWOOLE_SOCK_TCP);
$tcp->set([
'open_length_check' => true,
'package_max_length' => 2 * 1024 * 1024,
'package_length_type' => 'N',
'package_body_offset' => 16,
'package_length_offset' => 0,
]);
$server->on("open", function ($serv, $req) {
echo "new WebSocket Client, fd={$req->fd}\n";
});
$tcp->on('receive', function ($server, $fd, $reactor_id, $data) {
echo 'here', PHP_EOL;
$body = substr($data, 0);
$value = swoole_serialize::unpack($body);
//ไป
้ๅ 9514 ็ซฏๅฃ็่ฟๆฅ
$websocket = $server->ports[0];
foreach ($websocket->connections as $_fd)
{
if ($server->exist($_fd))
{
$server->push($_fd, json_encode($value));
}
}
});
$server->start();
$server->start();<file_sep>/server/taskCo.php
<?php
$server = new swoole_http_server('127.0.0.1', 8989);
$server->set([
'worker_num' => 5,
'task_worker_num' => 2,
]);
$server->on('task', function (swoole_server $server, $task_id, $worker_id, $data) {
echo "#{$server->worker_id}\tonTask: worker_id={$worker_id} | {$server->worker_id}, task_id=$task_id\n";
$closeFdArrary = $server->heartbeat();
var_export($closeFdArrary);
if ($server->worker_id == 1) {
sleep(1);
}
return $data;
});
$server->on('request', function (swoole_http_request $request, swoole_http_response $response) use ($server) {
$tasks[0] = 'hello world';
$tasks[1] = [
'data' => 1234,
'code' => 200,
];
$socket = $server->getSocket();
if (!socket_set_option($socket, SOL_SOCKET, SO_REUSEADDR, 1)) {
echo 'Unable to set option on socket: '. socket_strerror(socket_last_error()) . PHP_EOL;
}
$result = $server->taskCo($tasks, 0.5);
$response->end('Test End, Result: ' . var_export($result, 1));
});
$server->start();<file_sep>/coroutine-http-client-test/client.php
<?php
use Swoole\Coroutine\Http\Client;
go(function () {
$cli = new Client('127.0.0.1', 8000);
$cli->set([
'timeout' => 3.0,
'keep_alive' => false,
]);
$cli->setHeaders([
'Host' => "localhost",
"User-Agent" => 'Chrome/49.0.2587.3',
'Accept' => 'text/html,application/xhtml+xml,application/xml',
'Accept-Encoding' => 'gzip',
]);
$cli->setCookies(['user' => 'solar']);
//$cli->setMethod('POST');
$cli->setData('foo=bar&bar=foo'); // ่ฏทๆฑ่ชๅจ่ฝฌไธบPOST rawData
$cli->addFile('./test.txt', 'file', 'application/text', 'ha.txt', 1,1);
$cli->addData('dasdas', 'file1', 'application/text', 'hah.txt');
$cli->execute('/index.php');
echo $cli->errCode, PHP_EOL;
echo $cli->statusCode, PHP_EOL;
echo $cli->body, PHP_EOL;
$cli->post('/post.php', array("a" => '1234', 'b' => '456'));
echo $cli->body;
$cli->close();
$cli->get('/solar.php');
echo $cli->errCode, PHP_EOL;
echo $cli->statusCode, PHP_EOL;
echo $cli->body, PHP_EOL;
});
go(function () {
$cli = new Client('127.0.0.1', 8000);
$cli->set([
'timeout' => 3.0,
'keep_alive' => false,
]);
$cli->setHeaders([
'Host' => "localhost",
"User-Agent" => 'Chrome/49.0.2587.3',
'Accept' => 'text/html,application/xhtml+xml,application/xml',
'Accept-Encoding' => 'gzip',
]);
$cli->setCookies(['user' => 'solar']);
//$cli->setMethod('POST');
$cli->setData('foo=bar&bar=foo'); // ่ฏทๆฑ่ชๅจ่ฝฌไธบPOST rawData
$cli->addFile('./test.txt', 'file', 'application/text', 'ha.txt', 1,1);
$cli->addData('dasdas', 'file1', 'application/text', 'hah.txt');
$cli->execute('/index.php');
echo $cli->errCode, PHP_EOL;
echo $cli->statusCode, PHP_EOL;
echo $cli->body, PHP_EOL;
});
go(function () {
$cli = new Client('127.0.0.1', 8000);
$cli->set([
'timeout' => 3.0,
'keep_alive' => false,
]);
$cli->setHeaders([
'Host' => "localhost",
"User-Agent" => 'Chrome/49.0.2587.3',
'Accept' => 'text/html,application/xhtml+xml,application/xml',
'Accept-Encoding' => 'gzip',
]);
$cli->setCookies(['user' => 'solar']);
$cli->post('/post.php', array("a" => '1234', 'b' => '456'));
echo $cli->body;
$cli->close();
});
go(function () {
$cli = new Client('127.0.0.1', 8000);
$cli->set([
'timeout' => 3.0,
'keep_alive' => false,
]);
$cli->setHeaders([
'Host' => "localhost",
"User-Agent" => 'Chrome/49.0.2587.3',
'Accept' => 'text/html,application/xhtml+xml,application/xml',
'Accept-Encoding' => 'gzip',
]);
$cli->setCookies(['user' => 'solar']);
$cli->get('/solar.php');
echo $cli->errCode, PHP_EOL;
echo $cli->statusCode, PHP_EOL;
echo $cli->body, PHP_EOL;
});
go(function () {
$cli = new Swoole\Coroutine\Http\Client('127.0.0.1', 8000);
$cli->setHeaders([
'Host' => "localhost",
"User-Agent" => 'Chrome/49.0.2587.3',
'Accept' => 'text/html,application/xhtml+xml,application/xml',
'Accept-Encoding' => 'gzip',
]);
$cli->set([ 'timeout' => 1]);
$cli->setDefer();
$cli->get('/');
echo $cli->body, PHP_EOL;
co::sleep(1);
$data = $cli->recv();
echo $data, PHP_EOL;
echo $cli->body, PHP_EOL;
});
go(function () {
$host = 'www.swoole.com';
$cli = new \Swoole\Coroutine\Http\Client($host, 443, true);
$cli->set(['timeout' => -1]);
$cli->setHeaders([
'Host' => $host,
"User-Agent" => 'Chrome/49.0.2587.3',
'Accept' => '*',
'Accept-Encoding' => 'gzip'
]);
$cli->download('/static/files/swoole-logo.svg', __DIR__ . '/logo.svg');
});
echo 'main script', PHP_EOL;
<file_sep>/timer/after.php
<?php
echo microtime(1), 'outside', PHP_EOL;
$str = 'solar';
Swoole\Timer::after(1000, function() use ($str) {
echo microtime(1), 'inside', PHP_EOL;
echo microtime(1), "timeout, $str\n";
//co::sleep(10);
sleep(1);
echo microtime(1), 'inside', PHP_EOL;
});
echo microtime(1), 'outside', PHP_EOL;
sleep(2); // ้ปๅก3ๅฝฑๅไบๅฎๆถๅจ็่งฆๅ
echo microtime(1), 'outside', PHP_EOL;<file_sep>/server/tick.php
<?php
$server = new \Swoole\Server('127.0.0.1', 10000);
$server->on('connect', function ($server, $fd) {
echo 'someone connect us' , $fd, PHP_EOL;
});
$server->on('receive', function($server, $fd, $reactor_id, $data) {
$server->tick(1000, function () use ($server, $fd) {
$server->send($fd, 'hello again');
});
});
$server->on('close', function ($server, $fd) {
echo 'someone close', $fd, PHP_EOL;
});
$server->start();<file_sep>/http-response-test/httpResponseTestServer.php
<?php
$http = new swoole_http_server('0.0.0.0', 8000, SWOOLE_PROCESS);
$http->set([
'http_compression' => true,
'http_gzip_level' => 1,
]);
$http->on('request', function (swoole_http_request $request, swoole_http_response $response) {
try {
if ($request->server['request_uri'] == '/favicon.ico') {
$response->status(404);
$response->end();
return; // ๅ้ข็ปๆๆง่ก
}
switch ($request->server['request_uri']) {
case '/' :
$response->header('x-user-token', '<PASSWORD>');
$response->cookie('x-token', '<PASSWORD>', 40, '/', '127.0.0.1', 1, 1);
$response->status(200);
//$response->gzip(1); // 4.1.0ๅไปฅไธๅทฒๅบๅผๆญคๅฝๆฐ ไฝฟ็จhttp_compressionๆฟไปฃ
$response->end('hello world');
break;
case '/sendfile' :
$response->header('Content-Type', 'application/text');
$response->header('Content-Disposition', 'attachment; filename="test.txt"');
$response->sendfile('./15test.txt', 1, 3); // filename offset length
break;
case '/write' :
$response->write('1');
$response->write(rand(1,2000));
$response->write(md5('s'));
$response->write('hi ha ha ha');
break;
}
} catch (Exception $e) {
$response->end($e->getMessage());
}
});
$http->start();<file_sep>/coroutine-asyc-call-test/concurrentCallClient.php
<?php
echo 'script begin', PHP_EOL;
echo 'asyc call mysql', PHP_EOL;
go(function () {
echo 'no setDefer' , PHP_EOL;
$mysql_config = [
'host' => '127.0.0.1',
'port' => 3306,
'user' => 'root',
'password' => '<PASSWORD>',
'database' => 'test',
];
$beginTime = microtime(1);
$mysql = new Swoole\Coroutine\MySQL();
if ($mysql->connect($mysql_config)) {
$res = $mysql->query("select * from market_client");
//var_export($res);
} else {
echo $mysql->error, PHP_EOL;
}
$mysql->close();
$redis = new Swoole\Coroutine\Redis();
if ($redis->connect('127.0.0.1', 6379)) {
//$redis->set('clientList', json_encode($res));
$list = $redis->get('clientList');
//var_dump($list);
}
$redis->close();
$httpClient = new \Swoole\Coroutine\Http\Client('https://www.baidu.com', 80);
$httpClient->setHeaders([
'Host' => "localhost",
"User-Agent" => 'Chrome/49.0.2587.3',
'Accept' => 'text/html,application/xhtml+xml,application/xml',
'Accept-Encoding' => 'gzip',
]);
$httpClient->set([ 'timeout' => 1]);
$httpClient->get('/index.php');
//echo $cli->body;
$httpClient->close();
$endTime = microtime(1);
echo 'no defer timing: ', $endTime - $beginTime, PHP_EOL;
});
go(function () {
echo 'setDefer' , PHP_EOL;
$mysql_config = [
'host' => '127.0.0.1',
'port' => 3306,
'user' => 'root',
'password' => '<PASSWORD>',
'database' => 'test',
];
$beginTime = microtime(1);
$mysql = new Swoole\Coroutine\MySQL();
if ($mysql->connect($mysql_config)) {
$mysql->setDefer();
$mysql->query("select * from market_client");
$res = $mysql->recv();
//var_export($res);
} else {
echo $mysql->error, PHP_EOL;
}
$mysql->close();
$redis = new Swoole\Coroutine\Redis();
if ($redis->connect('127.0.0.1', 6379)) {
//$redis->set('clientList', json_encode($res));
$redis->setDefer();
$redis->get('clientList');
$list = $redis->recv();
//var_dump($list);
}
$redis->close();
$httpClient = new \Swoole\Coroutine\Http\Client('https://www.baidu.com', 80);
$httpClient->setHeaders([
'Host' => "localhost",
"User-Agent" => 'Chrome/49.0.2587.3',
'Accept' => 'text/html,application/xhtml+xml,application/xml',
'Accept-Encoding' => 'gzip',
]);
$httpClient->set([ 'timeout' => 1]);
$httpClient->setDefer();
$httpClient->get('/index.php');
//echo $cli->body;
$httpClient->recv();
$httpClient->close();
$endTime = microtime(1);
echo 'timing: ', $endTime - $beginTime, PHP_EOL;
});<file_sep>/coroutine-channel-test/channel.php
<?php
$chan = new \Swoole\Coroutine\Channel(3);
\Swoole\Coroutine::create(function () use ($chan) {
for ($i = 0; $i<10; $i++) {
\Swoole\Coroutine::sleep(1);
$chan->push($i);
echo 'push ', 'loop', $i, PHP_EOL;
}
});
\Swoole\Coroutine::create(function () use ($chan) {
for ($i = 0; $i<10; $i++) {
\Swoole\Coroutine::sleep(0.1);
$a = $chan->pop();
echo 'pop ', $a, 'loop', $i, PHP_EOL;
}
});
//swoole_event::wait();
<file_sep>/README.md
# swoole-test
learn how to swoole
<file_sep>/http-request-test/httpRequestTestServer.php
<?php
$http = new swoole_http_server('0.0.0.0', 8000, SWOOLE_PROCESS);
$http->on('request', function (swoole_http_request $request, swoole_http_response $response) {
if ($request->server['request_uri'] == '/favicon.ico') {
$response->status(404);
$response->end();
return; // ๅ้ข็ปๆๆง่ก
}
$response->cookie('x-token', '<PASSWORD>', 40, '/', '127.0.0.1', 1, 1);
echo 'server', PHP_EOL;
print_r($request->server);
echo 'header', PHP_EOL;
print_r($request->header);
echo 'get', PHP_EOL;
print_r($request->get);
echo 'post', PHP_EOL;
print_r($request->post);
echo 'cookie', PHP_EOL;
print_r($request->cookie);
echo 'files', PHP_EOL;
print_r($request->files);
echo 'rawContent', PHP_EOL;
print_r($request->rawContent());
echo 'getData', PHP_EOL;
print_r($request->getData());
switch ($request->server['request_uri']) {
default:
if (! empty($request->files)) {
foreach ($request->files as $file) {
if ($file['error'] == 0) {
move_uploaded_file($file['tmp_name'], './' . rand(1,200) .$file['name']);
}
}
}
$response->end('Hello' . $request->server['request_uri']);
}
});
$http->start();<file_sep>/coroutine-asyc-call-test/channelAsycCall.php
<?php
$server = new swoole_http_server('0.0.0.0', 8888);
$server->on('request', 'onRequest');
$server->start();
function onRequest(swoole_http_request $request, swoole_http_response $response) {
$uri = $request->server['request_uri'];
switch ($uri) {
case '/favicon.ico':
$response->status(404);
$response->end();
break;
default:
echo $uri, PHP_EOL;
$func = substr($uri, 1);
if (function_exists($func)) {
$func($request, $response);
} else {
$response->end('Hello stranger');
break;
}
}
}
function asycChannel(swoole_http_request $request, swoole_http_response $response) {
$begin = microtime(1);
$chan = new chan(2);
go(function () use ($chan) {
$cli = new Swoole\Coroutine\Http\Client('www.qq.com', 80);
$cli->set(['timeout' => 10]);
$cli->setHeaders([
'Host' => "www.qq.com",
"User-Agent" => 'Chrome/49.0.2587.3',
'Accept' => 'text/html,application/xhtml+xml,application/xml',
'Accept-Encoding' => 'gzip',
]);
$cli->get('/');
$chan->push(['www.qq.com' => $cli->body]);
});
go(function () use ($chan) {
$cli = new Swoole\Coroutine\Http\Client('www.163.com', 80);
$cli->set(['timeout' => 10]);
$cli->setHeaders([
'Host' => "www.163.com",
"User-Agent" => 'Chrome/49.0.2587.3',
'Accept' => 'text/html,application/xhtml+xml,application/xml',
'Accept-Encoding' => 'gzip',
]);
$cli->get('/');
$chan->push(['www.163.com' => $cli->body]);
});
$result = [];
for ($i = 0; $i < 2; $i++)
{
$result += $chan->pop();
}
$response->header('Content-Type', 'text/plain');
$response->end(serialize($result));
$end = microtime(1);
echo 'asyc: ', ($begin - $end), PHP_EOL;
return;
}
function seqCall(swoole_http_request $request, swoole_http_response $response) {
$begin = microtime(1);
$cli = new Swoole\Coroutine\Http\Client('www.qq.com', 80);
$cli->set(['timeout' => 10]);
$cli->setHeaders([
'Host' => "www.qq.com",
"User-Agent" => 'Chrome/49.0.2587.3',
'Accept' => 'text/html,application/xhtml+xml,application/xml',
'Accept-Encoding' => 'gzip',
]);
$cli->get('/');
$result[] = $cli->body;
$cli->close();
$cli = new Swoole\Coroutine\Http\Client('www.163.com', 80);
$cli->set(['timeout' => 10]);
$cli->setHeaders([
'Host' => "www.163.com",
"User-Agent" => 'Chrome/49.0.2587.3',
'Accept' => 'text/html,application/xhtml+xml,application/xml',
'Accept-Encoding' => 'gzip',
]);
$cli->get('/');
$result[] = $cli->body;
$cli->close();
$response->header('Content-Type', 'text/plain');
$response->end(serialize($result));
$end = microtime(1);
echo 'seq: ', ($begin - $end), PHP_EOL;
return;
}<file_sep>/coroutine-mysql-test/mysqlClient.php
<?php
use Swoole\Coroutine\MySQL;
$mysqlConfig = [
'host' => '127.0.0.1',
'port' => 3306,
'user' => 'root',
'password' => '',
'database' => 'test',
'fetch_mode' => true,
];
go(function () use ($mysqlConfig) {
$swoole_mysql = new MySQL();
$rs = $swoole_mysql->connect($mysqlConfig);
if ($rs === false) {
echo $swoole_mysql->connect_error, PHP_EOL;
echo 'Connect failed', PHP_EOL;
return;
}
// $sql = "select name,tel from market_client where name='solar'";
//
// $sql = $swoole_mysql->escape($sql);
// var_dump($sql); // "select name,tel from market_client where name=\'solar\'"
// $res = $swoole_mysql->query($sql);
// var_dump($res);
// $redis = new Swoole\Coroutine\Redis();
// $redis->connect('127.0.0.1', 6379);
// var_dump($res);
// var_dump(serialize($res));
//
// $res = $redis->set('client1', serialize($res));
// var_dump($res);
// var_dump($redis->get('client1'));
// $stmt = $swoole_mysql->prepare("select * from market_client limit 2");
// $stmt->execute();
// while ($ret = $stmt->fetch()) {
// var_dump($ret);
// }
$stmt = $swoole_mysql->prepare('CALL reply(?)');
if ($stmt) {
$stmt->execute(['solar']);
do {
echo '12';
$res = $stmt->fetchAll();
var_dump(json_encode($res));
echo PHP_EOL;
} while ($res = $stmt->nextResult());
var_dump($stmt->affected_rows);
} else {
echo $stmt->error;
}
// $res = $swoole_mysql->query("call reply('solar')");
// var_dump($res);
});
<file_sep>/server/process.php
<?php
$server = new \Swoole\Server('127.0.0.1', 10000);
$process = new Swoole\Process(function($process) use ($server) {
while (true) {
$msg = $process->read();
foreach($server->connections as $conn) {
$server->send($conn, $msg);
}
}
});
$server->addProcess($process);
$server->on('receive', function ($serv, $fd, $reactor_id, $data) use ($process) {
//็พคๅๆถๅฐ็ๆถๆฏ
$process->write($data);
});
$server->start();
|
271ef0601c25a93fa139fc2267b20b9d771ae5ca
|
[
"Markdown",
"PHP"
] | 24 |
PHP
|
5412/swoole-test
|
62101aac5499452a6e46951c39d3460c13178c19
|
67e7b62f29848dd1eb12b5f0ce8677d5dbb695da
|
refs/heads/master
|
<file_sep>package com.example.user.todolist;
import java.io.Serializable;
import java.util.Date;
/**
* Created by user on 2017.02.07..
*/
public class Ieraksts implements Serializable{
private String ierakstaTeksts;
private Date pievienosanasDatums;
private int svarigums;
public Ieraksts(String ierakstaTeksts,int svarigums){
this.ierakstaTeksts = ierakstaTeksts;
this.pievienosanasDatums = new Date();
this.svarigums = svarigums;
}
@Override
public String toString(){
return ierakstaTeksts;
}
public Date getDate(){
return pievienosanasDatums;
}
public void setText(String text){
ierakstaTeksts = text;
}
}
<file_sep>package com.example.user.todolist;
import android.content.Context;
import android.widget.Toast;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
/**
* Created by user on 2017.02.14..
*/
public class FailaOperacijas {
Context myContext ;
public FailaOperacijas(Context context){
myContext = context;
}
//make picture and save to a folder
private File getOutputMediaFile() {
//make a new file directory inside the "sdcard" folder
File dataStorageDir = new File("/sdcard/", "ToDoList_data");
//if this "JCGCamera folder does not exist
if (!dataStorageDir.exists()) {
//if you cannot make this folder return
if (!dataStorageDir.mkdirs()) {
return null;
}
}
//take the current timeStamp
String timeStamp = new SimpleDateFormat("yyyyMMdd_HHmmss").format(new Date());
File mediaFile;
//and make a media file:
mediaFile = new File(dataStorageDir.getPath() + File.separator + "save.dat");
return mediaFile;
}
public void saglabat(ArrayList data){
OutputStream outStream = null;
// File file = new File(String.valueOf(getOutputMediaFile()));
try {
outStream = new FileOutputStream(getOutputMediaFile());
//img.compressToJpeg(rect, 100, outStream);
// outStream.write(data);
outStream.flush();
outStream.close();
Toast toast = Toast.makeText(myContext, " saved: ", Toast.LENGTH_LONG);
toast.show();
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
}
|
dc193093a3b575f9aa5ddc3f724666872415a121
|
[
"Java"
] | 2 |
Java
|
Arturs85/ToDoList
|
90a96f8c71f82788b789b52728551053bd7dbc4c
|
eae64bd956389051061dd992b11e58eb9f85417d
|
refs/heads/master
|
<repo_name>nmedina17/oir<file_sep>/R/statsTbl.R
library(here);
# source(
# here("analysis/stats.R")
# ) #...$varData
library(tidymodels) #glance()
library(ggbeeswarm) #quasirandom()
library(ggpmisc) #stat_poly_eq()
library(ggpubr) #ggarrange()
# source(
# here("analysis/disfit.R")
# ) #disfit()
#main
#' Gather results of basic regression, assumption checks, & generalized versions
#'
#' @param ...nestedVarDataTbl
#' @param ...formula
#'
#' @return large tibble
#' @export
#'
#' @examples
getStatsTbl <- function(
#has"varData"col
...nestedVarDataTbl,
#user's
...formula
) {
...nestedVarDataTbl %>%
statFitTbl(
...formula
) %>%
addStatEval(
...formula
) %>%
#nonnormaldistr
addStatFitNon(
...formula
) %>%
addStatEvalNon() %>%
addGraph(
...formula
)
}
#sub-funcs
#' Mutates lm and summary output onto tbl--needs $varData, $statTest
#'
#' @param ...nestedVarDataTbl
#' @param ....formula
#'
#' @return mutated tbl
#' @export
#'
#' @examples
statFitTbl <- function(
...nestedVarDataTbl,
....formula
) {
...nestedVarDataTbl %>%
mutate(
"statTest" =
varData %>%
modify(
~ .x %>%
lm(
formula =
....formula
)
),
"statPrint" =
statTest %>%
modify(
~ .x %>%
tidy() %>%
full_join(
.,
.x %>%
glance()
) %>%
filter(
!is.na(
term
)
)
)
)
}
#' Mutates lm assumptions eval onto tbl--needs $varData, $statTest, $statPrint
#'
#' @param ...statFitTbl
#' @param ....formula
#'
#' @return mutated tbl
#' @export
#'
#' @examples
addStatEval <- function(
...statFitTbl,
....formula
) {
...statFitTbl %>%
mutate(
normalTest =
statTest %>%
modify(
~ .x %>%
residuals() %>%
shapiro_test()
),
#OGdata
varyTest =
ifelse(
#noLmerYet
{
....formula[[3]] %>%
length()
} == 1,
{
varData %>%
modify(
~ .x %>%
levene_test(
formula =
....formula[[2]] %>%
eval() ~
....formula[[3]] %>%
eval() %>%
as_factor()
)
)
},
list(
tibble(
"p" = NA
)
)
),
"isNormal" =
normalTest %>%
modify(
~ if_else(
.x$
p.value >
0.055,
T, F
)
),
# )
# }
"isHomosced" =
varyTest %>%
modify_if(
!is.na(
varyTest
),
~ if_else(
.x$
p >
0.055,
T, F
),
~ NA
)
) %>%
#list2vec
unnest(
c(
isNormal,
isHomosced
)
) %>%
mutate(
"isModelOK" =
if_else(
isNormal &
isHomosced,
T, F
)
) %>%
unnest(
isModelOK
) %>%
# } #de-bug
mutate(
"pval" =
statPrint %>%
modify_if(
{
isModelOK &
!is.na(
isModelOK
)
},
~ .x %>%
filter(
term != "(Intercept)"
) %>%
pull(
p.value
#orOtherLabel
),
.else = ~ NA
),
"isSignif" =
statPrint %>%
modify_if(
{
isModelOK &
!is.na(
isModelOK
)
},
~ if_else(
#mainterm
.x$
p.value[2] <
0.1055,
T, F
),
.else = ~ NA
)
) %>%
unnest(
isSignif
) %>%
mutate(
"R2adj" =
#noLmerYet
ifelse(
{
....formula[[3]] %>%
length()
} == 1,
{
statPrint %>%
modify(
~ .x %>%
filter(
term !=
"(Intercept)"
) %>%
pull(
adj.r.squared
)
)
},
list(
tibble(
"adj.r.squared" = NA
)
)
)
) %>%
unnest(
R2adj
)
}
#nonnormalstats
#nestedmodifyishard
#' Mutates glm summary output onto tbl--needs $varData
#'
#' @param ...statEvalTbl
#' @param ....formula
#'
#' @return mutated tbl
#' @export
#'
#' @examples
addStatFitNon <- function(
...statEvalTbl,
....formula
) {
if_else(
{
....formula[[3]] %>%
length()
} == 1,
{
...statEvalTbl %>%
mutate(
"statTestPois" =
varData %>%
modify_if(
!isModelOK &
!is.na(
isModelOK
),
~ .x %>%
glm(
formula = ....formula,
family = poisson()
) %>%
summary(),
.else = ~ NA
),
"statTestGamma" =
varData %>%
modify_if(
!isModelOK &
!is.na(
isModelOK
),
~ .x %>%
glm(
formula = ....formula,
family = Gamma(
link = "log"
)
) %>%
summary(),
.else = ~ NA
)
)
},
{
...statEvalTbl
}
)
}
#' Mutates key glm values onto tbl--needs $isModelOK, $statTestPois, $statTestGamma
#'
#' @param ...statFitNonTbl
#'
#' @return mutated tbl
#' @export
#'
#' @examples
addStatEvalNon <- function(
...statFitNonTbl
) {
...statFitNonTbl %>%
#getvals
mutate(
poisAIC =
statTestPois %>%
modify_if(
!isModelOK &
!is.na(
isModelOK
),
~ .x$
aic
),
gammaAIC =
statTestGamma %>%
modify_if(
!isModelOK &
!is.na(
isModelOK
),
~ .x$
aic
),
poisPval =
statTestPois %>%
modify_if(
!isModelOK &
!is.na(
isModelOK
),
~ .x$
coefficients[8]
),
gammaPval =
statTestGamma %>%
modify_if(
!isModelOK &
!is.na(
isModelOK
),
~ .x$
coefficients[8]
)
) %>%
unnest(
c(
poisAIC,
gammaAIC,
poisPval,
gammaPval
)
) %>%
#eval
mutate(
"pickAIC" =
pmin(
poisAIC,
gammaAIC
)
) %>%
unnest(
pickAIC
) %>%
mutate(
#morefamilies?
"pickPval" =
ifelse(
pickAIC == poisAIC,
poisPval,
if_else(
pickAIC == gammaAIC,
gammaPval,
9
)
),
"isSignif9" =
if_else(
pickPval <
0.105,
T, F
)
)
}
#' Mutates kruskal_test summary output onto tbl--needs $varData, $isModelOK
#'
#' @param ...statEvalTbl
#' @param ....formula
#'
#' @return mutated tbl
#' @export
#'
#' @examples
addStatFitNonNP <- function(
...statEvalTbl,
....formula
) {
...statEvalTbl %>%
mutate(
statTestNP =
varData %>%
modify_if(
!isModelOK &
!is.na(
isModelOK
),
~ .x %>%
kruskal_test(
....formula
)
) %>%
summary()
)
}
#' Mutates geom_smooth lm plots onto tbl--needs $varData
#'
#' @param ...statEvalNonTbl
#' @param ....formula
#'
#' @return mutated tbl
#' @export
#'
#' @examples
addGraph <- function(
...statEvalNonTbl,
....formula
) {
...statEvalNonTbl %>%
mutate(
graph =
varData %>%
modify(
~ .x %>%
ggplot(
aes(
x = {
....formula[[3]] %>%
eval()
},
y = {
....formula[[2]] %>%
eval()
}
)
) +
geom_quasirandom() +
geom_smooth(
method = "lm"
) +
stat_poly_eq(
formula = y ~ x,
parse = F,
aes(
label = paste(
after_stat(
p.value.label
),
after_stat(
adj.rr.label
)
)
)
) +
labs(
y = deparse(
....formula[[2]]
),
x = deparse(
....formula[[3]]
)
)
)
)
}
<file_sep>/R/poweRlawTest.R
library(poweRlaw)
#replaces_broom::
#bootstrap::
# library(rsample)
#' Gather results of non-linear distribution fits to validate a power law fit
#'
#' @param RealFreq
#'
#' @return "results" data frame
#' @export
#'
#' @examples
CheckPoweRlaw <- function(
RealFreq
) { #freqvector
#Clauset2009,Gillespie2015
#bottleneck==boostrap()
RealFreq <- round(
RealFreq
)
Pl <- displ$new(
RealFreq
)
Pl$setPars(
estimate_pars(
Pl
)
)
#bottleneck==bootstrap()
PlVar <- var(
poweRlaw::bootstrap(
Pl
)$bootstraps$pars
) #variance
#plot(Pl)
PlP <- bootstrap_p(
Pl
)$p #>0.1passes
Exp <- disexp$new(
RealFreq
)
Exp$setPars(
estimate_pars(
Exp
)
)
ExpVar <- var(
poweRlaw::bootstrap(
Exp
)$bootstraps$pars
)
Pois <- dispois$new(
RealFreq
)
Pois$setPars(
estimate_pars(
Pois
)
)
PoisVar <- var(
poweRlaw::bootstrap(
Pois
)$bootstraps$pars
)
Lognorm <- dislnorm$new(
RealFreq
)
Lognorm$setPars(
estimate_pars(
Lognorm
)
)
LognormVar <- poweRlaw::bootstrap(
Lognorm
)$bootstraps
LognormVar1 <- var(
LognormVar$pars1
)
LognormVar2 <- var(
LognormVar$pars2,
na.rm = T
)
#null=bothOK #1sided=arg1==arg2
PlExpP <- compare_distributions(
Pl,
Exp
)$p_one_sided #<0.05=arg1better
PlLognormP <- compare_distributions(
Pl,
Lognorm
)$p_one_sided
PlPoisP <- compare_distributions(
Pl,
Pois
)$p_one_sided
results <- data.frame(
"PlP" = PlP,
"PlExpP" = PlExpP,
"PlLognormP" = PlLognormP,
"PlPoisP" = PlPoisP,
"PlPar" = Pl$pars,
"ExpPar" = Exp$pars,
"PoisPar" = Pois$pars,
"LognormPar1" = Lognorm$pars[1],
"LognormPar2" = Lognorm$pars[2],
"PlVar" = PlVar,
"ExpVar" = ExpVar,
"PoisVar" = PoisVar,
"LognormVar1" = LognormVar1,
"LognormVar2" = LognormVar2
)
return(
results
)
}
<file_sep>/R/ordTbl.R
#ordinate
#' Organize ordination attributes into plot-able tibble
#'
#' @param ...commTbl
#' @param ...metaTbl
#'
#' @return tibble of coordinates and sample attributes
#' @export
#'
#' @examples
getOrdVarTbl <- function(
...commTbl,
...metaTbl
) {
ord = rda(
...commTbl
)
ordTbl = scores(
ord
)$sites %>%
as_tibble()
cbind(
...metaTbl,
ordTbl
) %>%
as_tibble()
}
<file_sep>/R/style.R
library(tidyverse)
#geom_quasirandom()
library(ggbeeswarm)
library(here)
# source(
# here(
# "analysis/statsTbl.R"
# )
# )
library(gginnards)
#append_layers()
library(glue)
style <- theme_bw() +
theme()
#dark?
theme_set(
style
)
dotGraph <- function(
..varData,
# $variable
# $varData
# $varData1
# $pval
# $pickPval
..var,
..x,
..y,
..xlab,
..ylab,
..addGroups = F,
..cleanData = NULL
) {
graph <- ..varData %>%
filter(
variable == ..var
) %>%
select(
"varData"
) %>%
unnest(
everything()
) %>%
ggplot(
aes(
x = {
..x %>%
eval()
},
y = {
..y %>%
eval()
}
)
) +
geom_quasirandom(
color = "black",
shape = 21,
fill = "white",
size = 2
) +
labs(
x = {
..xlab %>%
eval()
},
y = {
..ylab %>%
eval()
}
) +
annotate(
"text",
label = glue(
"P = ",
{
varResult <- ..varData %>%
filter(
variable == ..var
)
checkResult <- varResult %>%
pull(
isModelOK
)
#if_else()2strict
showP <- ifelse(
checkResult &
!is.na(
checkResult
),
{
varResult %>%
pull(
"pval"
)
},
{
varResult %>%
pull(
"pickPval"
)
}
) %>%
last() %>%
as.double()
ifelse(
showP >
0.001,
round(
showP,
3
),
"< 0.001"
)
}
),
x = 1,
y = 1,
hjust = 0
)
#toggles
graph <- if(
is.null(
..cleanData
)
) {
graph
} else {
graph <- graph %>%
#move
append_layers(
geom_quasirandom(
data = ..cleanData,
aes(
y = {
..var %>%
eval()
}
),
color = "gray",
size = 1
),
position = "bottom"
)
}
graph <- if(
..addGroups == T
) {
graph <- graph +
geom_point(
data = {
..varData %>%
filter(
variable == ..var
) %>%
select(
"varData1"
) %>%
unnest(
everything()
)
},
color = "black",
size = 3
)
} else {
graph
}
}
|
237b1b358396e1ec050c84950962554cb7850604
|
[
"R"
] | 4 |
R
|
nmedina17/oir
|
864a26f2b3e29a349b3d52af5b713b7545493594
|
d4aaa1051696f2d5211f513857ff1544dec5c8da
|
refs/heads/master
|
<repo_name>LukaszCzubernat/praca_magisterska<file_sep>/src/main/java/pl/lc/app/service/UserService.java
package pl.lc.app.service;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import pl.lc.app.model.User;
import pl.lc.app.repository.UserRepository;
import java.util.List;
import java.util.Optional;
@Service
public class UserService {
@Autowired
UserRepository userRepository;
public Optional<User> getUserDetails(String userName) {
return Optional.ofNullable(userRepository.findByUserName(userName));
}
public User save(User user) {
return userRepository.save(user);
}
public Optional<List<User>> getUsers() {
return Optional.ofNullable(userRepository.findAll());
}
}
<file_sep>/src/main/resources/application.properties
server.port=8888
spring.jackson.serialization.WRITE_DATES_AS_TIMESTAMPS=false
mongo.collection.name=notesCollection
spring.data.mongodb.host=localhost
spring.data.mongodb.port=27017
spring.data.mongodb.database=notes
spring.data.mongodb.uri=mongodb://${spring.data.mongodb.host}:${spring.data.mongodb.port}/${mongo.collection.name}}
spring.data.mongodb.username=mongo
spring.data.mongodb.password=<PASSWORD>
spring.datasource.driver-class-name=org.postgresql.Driver
spring.datasource.url= jdbc:postgresql://localhost:5432/usersDB
spring.datasource.username=postgres
spring.datasource.password=<PASSWORD>
spring.jpa.hibernate.ddl-auto=create-drop
spring.jpa.properties.hibernate.dialect=org.hibernate.dialect.PostgreSQLDialect
spring.jpa.properties.hibernate.temp.use_jdbc_metadata_defaults = false<file_sep>/src/main/java/pl/lc/app/model/User.java
package pl.lc.app.model;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import lombok.NoArgsConstructor;
import javax.persistence.*;
@Entity(name = "users")
@Data
@NoArgsConstructor
public class User {
@Id
@GeneratedValue(strategy = GenerationType.AUTO)
@ApiModelProperty(hidden = true)
private Long id;
@ApiModelProperty(position = 0)
@Column(name = "nameName")
private String userName;
@Column(name = "fullName")
@ApiModelProperty(position = 1)
private String fullName;
}
<file_sep>/src/main/java/pl/lc/app/repository/NoteRepository.java
package pl.lc.app.repository;
import org.springframework.data.mongodb.repository.MongoRepository;
import pl.lc.app.model.Note;
public interface NoteRepository extends MongoRepository<Note, String> {
}
<file_sep>/README.md
Implementacja aplikacji w oparciu o Spring Boot stworzona na potrzeby pracy magisterskiej.<file_sep>/src/main/java/pl/lc/app/service/ValidationService.java
package pl.lc.app.service;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import pl.lc.app.repository.UserRepository;
import java.util.Optional;
@Component
public class ValidationService {
@Autowired
UserRepository userRepository;
public boolean checkUserExistence(String userName) {
return Optional.ofNullable(userRepository.findByUserName(userName)).isPresent();
}
}
<file_sep>/src/main/java/pl/lc/app/contorller/UserController.java
package pl.lc.app.contorller;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.*;
import pl.lc.app.model.User;
import pl.lc.app.service.UserService;
import pl.lc.app.service.ValidationService;
import java.util.List;
import java.util.Optional;
@RestController
@RequestMapping(value = "/users")
public class UserController {
@Autowired
private UserService userService;
@Autowired
private ValidationService validationService;
@RequestMapping(value = "/", method = RequestMethod.GET)
public ResponseEntity getUsers() {
Optional<List<User>> users = userService.getUsers();
if(users.isPresent()) {
return ResponseEntity.status(HttpStatus.OK).body(users.get());
}
return ResponseEntity.status(HttpStatus.BAD_REQUEST).body("There no users yet.");
}
@RequestMapping(value = "/{userName}", method = RequestMethod.GET)
public ResponseEntity getUserById(@PathVariable String userName) {
Optional<User> userDetails = userService.getUserDetails(userName);
if(userDetails.isPresent()) {
return ResponseEntity.status(HttpStatus.OK).body(userDetails.get());
}
return ResponseEntity.status(HttpStatus.BAD_REQUEST).body("There is no such user.");
}
@RequestMapping(value = "/add", method = RequestMethod.PUT)
public ResponseEntity addUser(@RequestBody User user) {
if(!validationService.checkUserExistence(user.getUserName())){
return ResponseEntity.status(HttpStatus.CREATED).body(userService.save(user));
}
return ResponseEntity.status(HttpStatus.BAD_REQUEST).body("There is such user already, pick differnt userName");
}
}
<file_sep>/pom.xml
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>pl.lc</groupId>
<artifactId>praca-magisterska</artifactId>
<version>0.0.1-SNAPSHOT</version>
<packaging>jar</packaging>
<name>praca-magisterska</name>
<description>Implementation of masterโs dissertation</description>
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>2.0.1.RELEASE</version>
<relativePath/>
</parent>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<java.version>1.8</java.version>
<compiler.plugin.version>3.5.1</compiler.plugin.version>
<project.source.version>1.8</project.source.version>
<project.target.version>1.8</project.target.version>
<mockito-all.version>1.10.19</mockito-all.version>
<junit.version>4.12</junit.version>
<hamcrest-all.version>1.3</hamcrest-all.version>
<jackson-databind.version>2.9.1</jackson-databind.version>
<jacoco-maven-plugin.version>0.7.9</jacoco-maven-plugin.version>
<springfox-swagger.version>2.8.0</springfox-swagger.version>
<lombok.version>1.16.20</lombok.version>
<jackson-datatype.version>2.6.0</jackson-datatype.version>
<postgresql.version>9.4-1206-jdbc42</postgresql.version>
</properties>
<dependencies>
<!-- Spring Framework dependencies -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-actuator</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-jpa</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-mongodb</artifactId>
</dependency>
<!--<dependency>-->
<!--<groupId>org.springframework.boot</groupId>-->
<!--<artifactId>spring-boot-starter-security</artifactId>-->
<!--</dependency>-->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-rest</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.security</groupId>
<artifactId>spring-security-test</artifactId>
<scope>test</scope>
</dependency>
<!-- Other dependencies -->
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
<version>${postgresql.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>${jackson-databind.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.datatype</groupId>
<artifactId>jackson-datatype-jsr310</artifactId>
<version>${jackson-datatype.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger2</artifactId>
<version>${springfox-swagger.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger-ui</artifactId>
<version>${springfox-swagger.version}</version>
</dependency>
<dependency>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
<version>${jacoco-maven-plugin.version}</version>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<version>${lombok.version}</version>
<scope>provided</scope>
</dependency>
<!-- Test dependencies -->
<dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-all</artifactId>
<version>${hamcrest-all.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>${junit.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-all</artifactId>
<version>${mockito-all.version}</version>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${compiler.plugin.version}</version>
<configuration>
<source>${project.source.version}</source>
<target>${project.target.version}</target>
</configuration>
</plugin>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
</project>
<file_sep>/src/main/java/pl/lc/app/model/Note.java
package pl.lc.app.model;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import com.fasterxml.jackson.datatype.jsr310.deser.LocalDateTimeDeserializer;
import com.fasterxml.jackson.datatype.jsr310.ser.LocalDateTimeSerializer;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.springframework.data.annotation.Id;
import org.springframework.data.mongodb.core.mapping.Document;
import java.time.LocalDateTime;
@Data
@NoArgsConstructor
@Document
@JsonInclude(JsonInclude.Include.NON_NULL)
public class Note {
@Id
@ApiModelProperty(hidden = true)
private String id;
@ApiModelProperty(example = "John", position = 1)
private String userName;
@ApiModelProperty(example = "Test message", position = 2)
private String message;
@JsonSerialize(using = LocalDateTimeSerializer.class)
@JsonDeserialize(using = LocalDateTimeDeserializer.class)
@ApiModelProperty(example = "2018-05-01T10:00:00", position = 3)
private LocalDateTime creationDate;
@JsonSerialize(using = LocalDateTimeSerializer.class)
@JsonDeserialize(using = LocalDateTimeDeserializer.class)
@ApiModelProperty(example = "2018-05-01T10:00:00", position = 4)
private LocalDateTime updateDate;
@ApiModelProperty(hidden = true)
private String additionalInfo;
}
|
699176acf1bf4ebd004f8f292ac3c2933a01d297
|
[
"Markdown",
"Java",
"Maven POM",
"INI"
] | 9 |
Java
|
LukaszCzubernat/praca_magisterska
|
9bb8796d457d964abea21e44973b00ff05c242b7
|
b032c5de1c4d7d573f568993358290a451e5d980
|
refs/heads/main
|
<repo_name>OmarBashaIOM/KoboToPDF<file_sep>/main.py
import json
from requests.auth import HTTPBasicAuth
import requests
from io import BytesIO
from PIL import Image
from reportlab.pdfgen import canvas
from reportlab.lib import pagesizes
from reportlab.lib import colors
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfbase import pdfmetrics
from reportlab.lib.units import cm
import os
import base64
import shutil
logo = '<KEY>'
imgdata = base64.b64decode(logo)
print('getcwd: ', os.getcwd())
print('__file__: ', __file__)
DateString = input("Enter Date Like 2020-09-09 ")
directory = str(os.getcwd()) + '/exported data on - ' + str(DateString)
if not os.path.exists(directory):
os.mkdir(directory)
kuser = 'iomammemergencymande'
kpass = '<PASSWORD>'
#Kobo JSON Link
KoboUrl = 'https://kc.humanitarianresponse.info/api/v1/data/623205?format=jsonp'
response = requests.get(
KoboUrl, auth=HTTPBasicAuth(kuser, kpass)) #Get JSON Data from Kobo Link
JsonReady = str(
response.content
)[11:
-3] #.replace('callback(','').replace(');','').replace('b','') #Remove extra text to make it JSON
ListFormated = json.loads(JsonReady) #Load JSON to LIST
linkbase = 'https://kc.humanitarianresponse.info/media/large?media_file=' + kuser + '%2Fattachments'
linkjoin = '%2F'
counter = 0
while counter < len(ListFormated):
if DateString == ListFormated[counter]['Date'][0:10]:
print(ListFormated[counter]['_uuid'])
print(ListFormated[counter]['formhub/uuid'])
bensn = str(
ListFormated[counter]
['group_mh8nj39/group_za0rs72/Serial_Number_for_Beneficiary']
) #ben SN
print('date ' + ListFormated[counter]['Date'][0:10]) #date
staffname = str(ListFormated[counter]
['group_mh8nj39/Name_of_IOM_Staff']) # staff name
benname = str(
ListFormated[counter]
['group_mh8nj39/group_za0rs72/Name_of_Beneficiary_Voucher_Receipt']
) #ben name
fuuid = str(ListFormated[counter]['formhub/uuid'])
uuid = str(ListFormated[counter]['_uuid'])
bnsig = linkbase + linkjoin + fuuid + linkjoin + uuid + linkjoin + str(
ListFormated[counter]
['group_mh8nj39/group_za0rs72/Beneficiary_Signature_Voucher'])
vpho = linkbase + linkjoin + fuuid + linkjoin + uuid + linkjoin + str(
ListFormated[counter]['group_mh8nj39/group_za0rs72/Voucher_Photo'])
ssig = linkbase + linkjoin + fuuid + linkjoin + uuid + linkjoin + str(
ListFormated[counter]
['group_mh8nj39/group_za0rs72/Signature_of_IOM_Staff'])
try:
idpho = linkbase + linkjoin + fuuid + linkjoin + uuid + linkjoin + str(
ListFormated[counter]['group_mh8nj39/group_za0rs72/ID_Photo'])
except:
idpho="aha"
print('Data Imported')
# Doc Details
fileName = directory + '/' + bensn + '.pdf'
# 0) Create document
pdf = canvas.Canvas(fileName, pagesize=pagesizes.A4)
pdf.setTitle('Omar Twait')
pdf.setAuthor('<NAME>')
pdf.setSubject('Omar Twait')
pdf.setKeywords('Omar Twait')
pdf.setFillColor(colors.black)
# Set Font Type and Size
pdfmetrics.registerFont(TTFont('abc', 'arial.ttf'))
pdf.setFont('abc', 12)
# Darw Rectangle
def stafName(c):
c.setFillColorRGB(0.87109375, 0.91796875,
0.96484375) #choose fill colour
c.rect(
1.67 * cm, 27.22 * cm, 17.52 * cm, 1.01 * cm, stroke=0,
fill=1) #draw rectangle
c.setFillColor(colors.black)
c.drawString(1.8 * cm, 27.6 * cm, 'IOM staff member name:')
c.drawString(14 * cm, 27.6 * cm,
' ' + staffname) #########################
stafName(pdf)
# Darw Rectangle
def benName(c):
c.setFillColorRGB(0.87109375, 0.91796875,
0.96484375) #choose fill colour
c.rect(
1.67 * cm, 21.6 * cm, 17.52 * cm, 1.01 * cm, stroke=0,
fill=1) #draw rectangle
c.setFillColor(colors.black)
c.drawString(1.8 * cm, 21.9 * cm,
'Beneficiary name (Voucher Receipt):')
c.drawString(14 * cm, 21.9 * cm,
' ' + benname) ######################
benName(pdf)
def benSuuuuuig(c):
c.drawString(1.8 * cm, 19.5 * cm,
'Beneficiary Signature (Voucher)')
c.drawString(3.6 * cm, 16 * cm, 'ID Photo')
c.drawString(14 * cm, 16 * cm, 'Voucher Photo')
c.drawString(3.6 * cm, 3.5 * cm, 'IOM staff signature')
benSuuuuuig(pdf)
def benSerial(c):
c.setFillColorRGB(0.87109375, 0.91796875,
0.96484375) #choose fill colour
c.rect(
1.67 * cm, 16.6 * cm, 17.52 * cm, 1.01 * cm, stroke=0,
fill=1) #draw rectangle
c.setFillColor(colors.black)
c.drawString(1.8 * cm, 16.9 * cm, 'Beneficiary serial number:')
c.drawString(14 * cm, 16.9 * cm,
' ' + bensn) #################################
benSerial(pdf)
def consent(c):
c.setFillColorRGB(0.84765625, 0.84765625,
0.84765625) #choose fill colour
c.rect(
1.67 * cm, 23 * cm, 17.52 * cm, 3.86 * cm, stroke=0,
fill=1) #draw rectangle
c.setFillColor(colors.black)
c.drawString = [
'Receipt Voucher I the undersigned certify that I have received three vouchers from IOM',
'Jordan, each voucher is equal to 70 JOD and can be used for purchase of food and Nonfood',
'Items (NFIs) from any branch of Sameh Mall inside Jordan. I understand that I can use',
'each voucher one time only before 10 September 2020. I understand that if the voucher',
'gets lost or damaged, I will not get another one as a replacement of the lost one.',
'I have received the voucher by an IOM staff member/ "Tamkeen Fields For Aid" staff member.'
]
c.text = pdf.beginText(1.8 * cm, 26 * cm)
c.text.setFont("abc", 11.5)
c.text.setFillColor(colors.black)
for line in c.drawString:
c.text.textLine(line)
pdf.drawText(c.text)
consent(pdf)
# Text Data For From
title = ['Consent Form and Receipt Voucher SDC 3']
text = pdf.beginText(150, 28.88 * cm)
text.setFont("abc", 16)
text.setFillColor(colors.black)
for line in title:
text.textLine(line)
pdf.drawText(text)
#Load Image From URL
resbnsig = requests.get(bnsig, auth=HTTPBasicAuth(kuser, kpass))
bnsigimage = Image.open(BytesIO(resbnsig.content))
new_width = 150
new_height = 90
bnsigimage = bnsigimage.resize((new_width, new_height),
Image.ANTIALIAS)
pdf.drawInlineImage(bnsigimage, 13.5 * cm, 18 * cm)
#Load Image From URL
resvpho = requests.get(vpho, auth=HTTPBasicAuth(kuser, kpass))
vphoimage = Image.open(BytesIO(resvpho.content))
new_width = 150
new_height = 150
vphoimage = vphoimage.resize((new_width, new_height), Image.ANTIALIAS)
pdf.drawInlineImage(vphoimage,14 * cm, 10 * cm)
#Load Image From URL
resssig = requests.get(ssig, auth=HTTPBasicAuth(kuser, kpass))
ssigimage = Image.open(BytesIO(resssig.content))
new_width = 150
new_height = 150
ssigimage = ssigimage.resize((new_width, new_height), Image.ANTIALIAS)
pdf.drawInlineImage(ssigimage, 13.5 * cm, 2 * cm)
#Load Image From URL
try:
residpho = requests.get(idpho, auth=HTTPBasicAuth(kuser, kpass))
idphoimage = Image.open(BytesIO(residpho.content))
except:
idphoimage = Image.open("nullph.png")
new_width = 150
new_height = 150
idphoimage = idphoimage.resize((new_width, new_height),
Image.ANTIALIAS)
pdf.drawInlineImage(idphoimage, 3 * cm, 10 * cm)
#Load Image From URL
#residpho = requests.get(idpho, auth=HTTPBasicAuth(kuser, kpass))
idphoimag = Image.open(BytesIO(imgdata))
new_width = 97
new_height = 49
l = idphoimag.resize((new_width, new_height), Image.ANTIALIAS)
pdf.drawInlineImage(l, 17.5 * cm, 28.22 * cm )
pdf.save()
print('PDF Created')
counter += 1
print('Task Completed Successfully')
shutil.make_archive(str(directory) + '-archive', 'zip', directory)
print('Zip File Created')
#uploade to Google Drive
#Cleaning and removing Temp files and folders
shutil.rmtree(directory)
print('Temp Folder Deleted')
#removefile = pathlib.Path(str(directory)+'-archive.zip')
#removefile.unlink()
|
f17879ef88ae14411c3ba6a9ec8cf4de89ee6b44
|
[
"Python"
] | 1 |
Python
|
OmarBashaIOM/KoboToPDF
|
b7896585cd4f494a2becff014bc635f0513c839c
|
883bc768f24dc0a41a2cc4ef764a7483c01565eb
|
refs/heads/master
|
<repo_name>akifquddus/Wordpress-REST-API<file_sep>/README.md
# Wordpress-REST-API
Simplified and Secure REST API to Create a Wordpress Post with Featured Image
## Verify User Wordpress URL and Login Info
```php
$fields_string = "";
$fields = array(
'wprequest' => true,
'user' => 'admin',
'pass' => '<PASSWORD>',
'type' => 'verify',
);
foreach($fields as $key=>$value) { $fields_string .= $key.'='.$value.'&'; }
rtrim($fields_string, '&');
$ch = curl_init();
curl_setopt($ch, CURLOPT_SSL_VERIFYPEER, false);
curl_setopt($ch, CURLOPT_URL, 'http://example.com' . $this->path);
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
curl_setopt($ch,CURLOPT_POST, count($fields));
curl_setopt($ch,CURLOPT_POSTFIELDS, $fields_string);
$result = json_decode(curl_exec($ch));
```
## Result
```php
{
"status" => true,
"message" => "Account Successfully Verified"
}
```
<file_sep>/api.php
<?php
/*
Plugin Name: WP REST API
Plugin URI: http://akifquddus.com
Description: WP REST API Plugin
Version: 1.1
Author: <NAME>
Author URI: http://akifquddus.com
License: GPL2
License URI: https://www.gnu.org/licenses/gpl-2.0.html
Text Domain: wprestapi
Domain Path: /languages
*/
if (($_POST['wprequest'])) {
include '../../../wp-includes/wp-db.php';
include '../../../wp-config.php';
include '../../../wp-load.php';
if ($_POST['type'] == 'verify') {
$user = htmlspecialchars($_POST['user'],ENT_QUOTES);
$pass = $_POST['pass'];
if (verify($user, $pass)) {
echo json_encode(array(
'status' => true,
'message' => "Account Successfully Verified",
));
} else {
echo json_encode(array(
'status' => false,
'message' => "Authentication Error Occurred",
));
}
} else if ($_POST['type'] == 'createpost') {
$user = htmlspecialchars($_POST['user'],ENT_QUOTES);
$pass = $_POST['<PASSWORD>'];
$userinfo = (wp_authenticate($user, $pass));
if (isset($userinfo->id)) {
$post = array(
'post_title' => $_POST['post_title'],
'post_content' => $_POST['post_content'],
'post_status' => $_POST['post_status'],
'post_category' => unserialize($_POST['post_category']),
);
$post = (wp_insert_post($post, true));
if ($_POST['image'] != '#') {
Generate_Featured_Image($_POST['image'], $post);
}
echo json_encode(array('status' => true, 'post' => get_post($post)));
} else {
echo json_encode(array('status' => false, 'message' => 'Authentication Error Occurred'));
}
}
}
function verify($User, $Pass) {
$user = $User;
$pass = $Pass;
$userinfo = (wp_authenticate($user, $pass));
if (is_wp_error($userinfo)) {
return false;
} else {
return true;
}
}
/**
* Takes Image URL and Post ID
* Upload Image to WP Directory and Database
* Add Image as a Featured to the Given Post ID
*/
function Generate_Featured_Image( $image_url, $post_id ){
$upload_dir = wp_upload_dir();
$image_data = file($image_url);
$filename = basename($image_url);
if(wp_mkdir_p($upload_dir['path']))
$file = $upload_dir['path'] . '/' . $filename;
else
$file = $upload_dir['basedir'] . '/' . $filename;
file_put_contents($file, $image_data);
$wp_filetype = wp_check_filetype($filename, null );
$attachment = array(
'post_mime_type' => $wp_filetype['type'],
'post_title' => sanitize_file_name($filename),
'post_content' => '',
'post_status' => 'inherit'
);
$attach_id = wp_insert_attachment( $attachment, $file, $post_id );
require_once(ABSPATH . 'wp-admin/includes/image.php');
$attach_data = wp_generate_attachment_metadata( $attach_id, $file );
$res1= wp_update_attachment_metadata( $attach_id, $attach_data );
$res2= set_post_thumbnail( $post_id, $attach_id );
}
|
f15ab4c559d8c517b325657684c6f66713cf36bf
|
[
"Markdown",
"PHP"
] | 2 |
Markdown
|
akifquddus/Wordpress-REST-API
|
a58459790473e867bd23413195b60931c647a599
|
d477cb0efb7714e4fc01b15ac733f2567f82be58
|
refs/heads/master
|
<file_sep>var util = require('util');
var moment = require("moment");
var mysql = require('mysql');
var config = require('./config');
function handleDisconnect() {
var db = config.database;
connection = mysql.createConnection({
host : db.host,
database : db.database,
user : db.user,
password : <PASSWORD>
});
connection.connect(function(err) {
if(err) {
console.log('error when connecting to db:', err);
setTimeout(handleDisconnect, 2000); //ๆฅ็ถๅคฑๆๆใชใใฉใค
}
});
connection.on('error', function(err) { //ใจใฉใผๅใๅใใณใผใซใใใฏ
console.log('db error', err);
if(err.code === 'PROTOCOL_CONNECTION_LOST') {
handleDisconnect(); //ๅๅบฆๆฅ็ถ
} else {
throw err;
}
});
}
handleDisconnect();
module.exports = function(robot) {
//connection.connect();
robot.hear(/hello/i, function(msg) {
msg.send('World!');
});
robot.hear(/\[test\] (.*)/i,function(msg){
item = msg.match[0];
msg.send(msg.match[1]);
});
robot.hear(/\[start\] (.*)/i, function(msg){
var user = msg.message.user.name;
connection.query('insert into attend (user, task, end) values ("'+ user +'","WIP:' + msg.match[1] +'", CURRENT_TIMESTAMP);', function(err, result) {
if (err) {
msg.send(err);
}
});
msg.send(user + "ใ" +msg.match[1]+"ใๅงใใพใใ๏ผ");
});
robot.hear(/\[end\] (.*)/i, function(msg){
//connection.connect();
var user = msg.message.user.name;
connection.query('select * from attend WHERE (DATE(start) = CURDATE() OR DATE(start) = DATE_SUB(CURDATE(),interval 1 day)) AND user = "'+user+'" AND task = "WIP:' +msg.match[1]+'";', function(err, result) {
if (err) {
msg.send(err);
}
if (util.inspect(result[0]) === "undefined") {
msg.send(msg.match[1]+"ใฏ้ๅงใใใฆใชใใ!");
}else{
connection.query('update attend set task="'+msg.match[1]+'", end = CURRENT_TIMESTAMP WHERE (DATE(start) = CURDATE() OR DATE(start) = DATE_SUB(CURDATE(),interval 1 day)) AND user = "'+user+'" AND task = "WIP:' +msg.match[1]+'";',function(err, result) {
if (err) {
msg.send("ใชใใ");
}
msg.send(msg.match[1]+"ใ็ตใใใพใใ๏ผ");
});
}
});
});
robot.hear(/\[end2\]/i, function(msg){
var user = msg.message.user.name;
connection.query('select * from attend WHERE (DATE(start) = CURDATE() OR DATE(start) = DATE_SUB(CURDATE(),interval 1 day)) AND user = "'+user+'" AND task LIKE "WIP:%";', function(err, result) {
if (err) {
msg.send(err);
}
if (util.inspect(result[0]) === "undefined") {
msg.send("ใฟในใฏใฏ้ๅงใใใฆใชใใ!");
}else{
connection.query('update attend set task="'+msg.match[1]+'", end = CURRENT_TIMESTAMP WHERE (DATE(start) = CURDATE() OR DATE(start) = DATE_SUB(CURDATE(),interval 1 day)) AND user = "'+user+'" AND task = "WIP:' +msg.match[1]+'";',function(err, result) {
if (err) {
msg.send("ใชใใ");
}
//msg.send(result[0].task.replace(/WIP:/g,"")+"ใ็ตใใใพใใ๏ผ");
});
}
msg.send(result[0].task.replace(/WIP:/g,""));
});
});
robot.hear(/\[sum\]/i, function(msg){
connection.query('select task, timediff(end,start) as time from attend where (DATE(start) = CURDATE() OR (DATE(end) = CURDATE() AND DATE(start) = DATE_SUB(CURDATE(),interval 1 day))) AND user = "'+msg.message.user.name+'";', function(err, result) {
if (err) {
msg.send(err);
}
for(var i in result){
var mTask = moment(result[i].time, "HH:mm:ss");
if(mTask.hours()==0){
var formatTask = mTask.format('mๅs็ง');
}else{
var formatTask = mTask.format('Hๆ้mๅs็ง');
}
msg.send(result[i].task+" "+formatTask);
}
connection.query('select sum(time_to_sec(timediff(end,start))) as sumTime from attend where (DATE(start) = CURDATE() OR (DATE(end) = CURDATE() AND DATE(start) = DATE_SUB(CURDATE(),interval 1 day))) AND user = "'+msg.message.user.name+'";', function(err, sum) {
if (err) {
msg.send(err);
}
msg.send("ไปๆฅใฎๅคๅๆ้ใฏ"+toHms(sum[0].sumTime)+"ใ ใ๏ผ");
});
});
});
function toHms(t) {
var hms = "";
var h = t / 3600 | 0;
var m = t % 3600 / 60 | 0;
var s = t % 60;
if (h != 0) {
hms = h + "ๆ้" + padZero(m) + "ๅ" + padZero(s) + "็ง";
} else if (m != 0) {
hms = m + "ๅ" + padZero(s) + "็ง";
} else {
hms = s + "็ง";
}
return hms;
function padZero(v) {
if (v < 10) {
return "0" + v;
} else {
return v;
}
}
}
robot.hear(/\[remove\] (.*)/i, function(msg){
var user = msg.message.user.name;
connection.query('delete from attend WHERE (DATE(start) = CURDATE() OR DATE(start) = DATE_SUB(CURDATE(),interval 1 day)) AND user = "'+user+'" AND task = "WIP:' +msg.match[1]+'";', function(err, result) {
if (err) {
msg.send(err);
}
if(result.affectedRows === 1){
msg.send(msg.match[1]+"ใๅใๆถใใพใใ๏ผ");
}else{
msg.send(msg.match[1]+"ใฏ้ๅงใใใฆใชใใ!");
}
});
});
robot.hear(/ไผๆฉ/i, function(msg) {
msg.send('ใ็ฒใใใพ๏ผใใฃใใไผใใงใญ');
});
robot.hear(/\[month\] (.*)/i, function(msg){
var user = msg.message.user.name;
var date = new Date();
var searchDay = msg.match[1];
if(searchDay.length == 1){
searchDay = '0' + searchDay;
}
var searchDate = date.getFullYear() + searchDay;
connection.query("select task, timediff(end,start) as time from attend where user = '"+ user +"' and DATE_FORMAT(start, '%Y%m') = "+ searchDate +";", function(err, result) {
if (err) {
msg.send(err);
}
for(var i in result){
var mTask = moment(result[i].time, "HH:mm:ss");
if(mTask.hours()==0){
var formatTask = mTask.format('mๅs็ง');
}else{
var formatTask = mTask.format('Hๆ้mๅs็ง');
}
msg.send(result[i].task+" "+formatTask);
}
});
connection.query("select sum(time_to_sec(timediff(end,start))) as sumTime from attend where user = '"+ user +"' and DATE_FORMAT(start, '%Y%m') = " + searchDate + ";", function(err, sum) {
if (err) {
msg.send(err);
}
msg.send(msg.match[1]+"ๆใฎๅคๅๆ้ใฏ"+toHms(sum[0].sumTime)+"ใ ใ๏ผ");
});
});
}
|
edf143011c34cfa9692f4674a6deb2d859e3fc4d
|
[
"JavaScript"
] | 1 |
JavaScript
|
TheDesignium/aoi_aoi_chan
|
159823a413be13d4e7d424693d35112fdfc55bfd
|
f512d137224ec4ce8f9f52dbda268a598456ee9b
|
refs/heads/master
|
<repo_name>giorgioscappaticcio/skinandinkadmin_new<file_sep>/app/scripts/controllers/main.js
'use strict';
/**
* @ngdoc function
* @name adminApp.controller:MainCtrl
* @description
* # MainCtrl
* Controller of the adminApp
*/
angular.module('adminApp')
.controller('MainCtrl', function ($scope, $window) {
$scope.popupHeight = $window.innerHeight + 'px';
$scope.popupWidth = $window.innerWidth + 'px';
$scope.isVisible = false;
$scope.showFullPage = function(color) {
$scope.fullpagestyle = {
'background-color' : color
}
$scope.isVisible = !$scope.isVisible;
}
$scope.backHome = function () {
$scope.isVisible = !$scope.isVisible;
$scope.fb_integration = false;
$scope.shownews = false;
}
});
<file_sep>/app/scripts/directives/fullpage/news/news.js
'use strict';
/**
* @ngdoc directive
* @name adminApp.directive:fullpage/news/news
* @description
* # fullpage/news/news
*/
angular.module('adminApp')
.directive('news', function (CommonMain, $timeout) {
return {
templateUrl: '/views/fullpage/news/news.html',
restrict: 'AE',
link: function postLink(scope, element, attrs) {
scope.newsData = {};
scope.viewGallery = false;
scope.showLoader = false;
scope.alertMsg = false;
scope.viewForm = true;
scope.mainTitle = '<i class="fa fa-plus-circle"></i> Add News';
scope.messageAdd = '<i class="fa fa-info-circle"></i> Edit News in the list below';
scope.update_news = function(){
CommonMain.get_news().then( function(d) {
// success
if(d){
scope.newsObj = d;
console.log(scope.newsObj);
scope.showLoader = false;
scope.messageAdd = '<i class="fa fa-info-circle"></i> Edit News in the list below';
}
}, function(d) {
// request rejected (error)
$scope.newsObj = {};
});
return;
}
scope.update_gallery = function(){
CommonMain.get_gallery().then( function(d) {
// success
if(d){
scope.galleryObj = d;
console.log(scope.galleryObj);
scope.showLoader = false;
scope.messageAdd = '<i class="fa fa-info-circle"></i> Select a picture from the list below';
scope.mainTitle = '<i class="fa fa-file-image-o"></i> Manage Gallery'
scope.viewGallery = !scope.viewGallery;
}
}, function(d) {
// request rejected (error)
$scope.newsObj = {};
});
return;
}
scope.display_gallery = function(){
scope.viewForm = !scope.viewForm;
scope.showLoader = true;
scope.update_gallery();
}
scope.selectThumb = function(index, link){
scope.selectedIndex = null;
scope.selectedIndex = index;
scope.newsData.picUrl = link;
$timeout(function(){scope.backToAdd();}, 500);
}
scope.delete_picture = function(id, url){
scope.alertMsg = true;
scope.viewGallery = !scope.viewGallery;
scope.pictureToDelete = id;
scope.pictureUrl = url;
scope.messageAdd = '<i class="fa fa-exclamation-triangle"></i> Warning: Delete confirmation.'
}
scope.confirm_delete_picture = function(){
CommonMain.delete_picture(scope.pictureToDelete).then( function(d) {
// success
if(d){
scope.deleteConfirm = d;
console.log(scope.deleteConfirm);
scope.backToGallery();
}
}, function(d) {
// request rejected (error)
$scope.newsObj = {};
});
return;
}
scope.backToGallery =function(){
scope.mainTitle = '<i class="fa fa-file-image-o"></i> Manage Gallery';
scope.messageAdd = '<i class="fa fa-info-circle"></i> Select a picture from the list below';
scope.alertMsg = false;
scope.update_gallery();
}
scope.backToAdd =function(){
scope.mainTitle = '<i class="fa fa-plus-circle"></i> Add News';
scope.messageAdd = '<i class="fa fa-info-circle"></i> Edit News in the list below';
scope.viewForm = !scope.viewForm;
scope.viewGallery = !scope.viewGallery;
}
scope.morphyBtn();
}
};
});
<file_sep>/app/scripts/directives/fullpage/fullpage.js
'use strict';
/**
* @ngdoc directive
* @name adminApp.directive:fullpage/fullpage
* @description
* # fullpage/fullpage
*/
angular.module('adminApp')
.directive('fullpage', function (CommonMain, $timeout) {
return {
templateUrl: '/views/fullpage/fullpage.html',
restrict: 'AE',
link: function postLink(scope, element, attrs) {
scope.$watch('fb_integration',function(oldvalue, newvalue){
if (oldvalue === newvalue){
return;
} else{
scope.update_general_content();
scope.update_tattoo_content();
}
});
scope.$watch('shownews',function(oldvalue, newvalue){
if (oldvalue === newvalue){
return;
} else{
scope.update_news();
//scope.update_tattoo_content();
}
});
// Contribute by Codrops -->
scope.morphyBtn = function(){
var docElem = window.document.documentElement, didScroll, scrollPosition;
// trick to prevent scrolling when opening/closing button
function noScrollFn() {
window.scrollTo( scrollPosition ? scrollPosition.x : 0, scrollPosition ? scrollPosition.y : 0 );
}
function noScroll() {
window.removeEventListener( 'scroll', scrollHandler );
window.addEventListener( 'scroll', noScrollFn );
}
function scrollFn() {
window.addEventListener( 'scroll', scrollHandler );
}
function canScroll() {
window.removeEventListener( 'scroll', noScrollFn );
scrollFn();
}
function scrollHandler() {
if( !didScroll ) {
didScroll = true;
setTimeout( function() { scrollPage(); }, 60 );
}
};
function scrollPage() {
scrollPosition = { x : window.pageXOffset || docElem.scrollLeft, y : window.pageYOffset || docElem.scrollTop };
didScroll = false;
};
scrollFn();
[].slice.call( document.querySelectorAll( '.morph-button' ) ).forEach( function( bttn ) {
new UIMorphingButton( bttn, {
closeEl : '.icon-close',
onBeforeOpen : function() {
// don't allow to scroll
noScroll();
},
onAfterOpen : function() {
// can scroll again
canScroll();
},
onBeforeClose : function() {
// don't allow to scroll
noScroll();
},
onAfterClose : function() {
// can scroll again
canScroll();
}
} );
} );
// for demo purposes only
[].slice.call( document.querySelectorAll( 'form button' ) ).forEach( function( bttn ) {
bttn.addEventListener( 'click', function( ev ) { ev.preventDefault(); } );
} );
}
}
};
});
<file_sep>/app/scripts/directives/fullpage/news.js
'use strict';
/**
* @ngdoc directive
* @name adminApp.directive:fullpage/news/news
* @description
* # fullpage/news/news
*/
angular.module('adminApp')
.directive('news', function (CommonMain, $timeout) {
return {
templateUrl: '/views/fullpage/news.html',
restrict: 'AE',
link: function postLink(scope, element, attrs) {
scope.startForm = function(){
scope.newsData = {};
scope.selectedIndex = null;
scope.viewGallery = false;
scope.showLoader = false;
scope.alertMsg = false;
scope.viewForm = true;
scope.showConfirm = false;
scope.showeditNews = false;
scope.showUploader = false;
scope.mainTitle = '<i class="fa fa-plus-circle"></i> Add News';
scope.messageAdd = '<i class="fa fa-info-circle"></i> Fill the form below to add News';
scope.messageNews = '<i class="fa fa-chevron-circle-down"></i> Edit News from the list below';
scope.newsPublished = true;
scope.newsPublishedMsg = '<span class="green"><i class="fa fa-eye"></i> Publish</span>';
scope.newsData.news_published = 1;
}
scope.startForm();
scope.update_news = function(){
CommonMain.get_news().then( function(d) {
// success
if(d){
scope.newsObj = d;
console.log(scope.newsObj);
scope.startForm();
scope.showLoader = false;
scope.messageAdd = '<i class="fa fa-info-circle"></i> Fill the form below to add News';
}
}, function(d) {
// request rejected (error)
scope.newsObj = {};
});
return;
}
scope.confirmAdd = function(){
scope.showConfirm = !scope.showConfirm;
scope.viewForm = !scope.viewForm;
scope.mainTitle = '<i class="fa fa-exclamation-triangle"></i> Confirm Add News';
scope.messageAdd = '<i class="fa fa-info-circle"></i> Click Publish to change publishing option';
}
scope.AddNews = function(){
CommonMain.insert_news(scope.newsData).then( function(d) {
// success
if(d){
scope.insertConfirm = d;
console.log(scope.insertConfirm);
if (d.success){
scope.newsData = {};
scope.backToAdd();
scope.messageAdd = '<span class="green"><i class="fa fa-check-square"></i> ' + d.msg + '</span>';
$timeout(function(){
scope.messageAdd = '<i class="fa fa-info-circle"></i> Fill the form below to add News';
}, 2000);
} else{
scope.messageAdd = '<i class="fa fa-info-circle"></i> ' + d.msg +'. Go Back to add info!';
}
}
}, function(d) {
// request rejected (error)
scope.insertConfirm = {};
});
return;
}
scope.publishNews = function(){
if (!scope.newsPublished){
scope.newsPublished = !scope.newsPublished;
scope.newsPublishedMsg = '<span class="green"><i class="fa fa-eye"></i> Publish</span>';
scope.newsData.news_published = 1;
} else {
scope.newsPublishedMsg = '<i class="fa fa-eye-slash"></i> Unpublish';
scope.newsPublished = !scope.newsPublished;
scope.newsData.news_published = 0;
}
}
scope.editNews = function(id){
scope.showeditNews = !scope.showeditNews;
scope.newsData = {};
scope.newsID = id;
scope.newsData.news_id = id;
scope.newsData.news_picture = null;
scope.message = '<i class="fa fa-chevron-circle-down"></i> Fill the form below to update the Tattoist';
}
scope.confirm_update = function(pub){
scope.newsID = null;
scope.showConfirm = true;
scope.newsData.news_published = pub;
scope.publishNews();
}
scope.UpdateNews = function(){
CommonMain.update_news(scope.newsData).then( function(d) {
// success
if(d){
scope.updateConfirm = d;
console.log(scope.insertConfirm);
if (d.success){
//scope.newsData = {};
scope.backToList();
scope.messageNews = '<span class="green"><i class="fa fa-check-square"></i> ' + d.msg + '</span>';
$timeout(function(){
scope.messageNews = '<i class="fa fa-chevron-circle-down"></i> Edit News from the list below';
}, 2000);
} else{
scope.messageNews = '<i class="fa fa-info-circle"></i> ' + d.msg +'. Go Back to add info!';
}
}
}, function(d) {
// request rejected (error)
scope.updateConfirm = {};
});
return;
}
scope.update_gallery = function(){
CommonMain.get_gallery().then( function(d) {
// success
if(d){
scope.galleryObj = d;
console.log(scope.galleryObj);
scope.showLoader = false;
scope.messageAdd = '<i class="fa fa-info-circle"></i> Select a picture from the list below';
scope.mainTitle = '<i class="fa fa-file-image-o"></i> Manage Gallery'
scope.viewGallery = !scope.viewGallery;
}
}, function(d) {
// request rejected (error)
scope.newsObj = {};
});
return;
}
scope.refresh_gallery = function(){
CommonMain.get_gallery().then( function(d) {
// success
if(d){
scope.galleryObj = d;
console.log(scope.galleryObj);
}
}, function(d) {
// request rejected (error)
scope.galleryObj = {};
});
return;
}
scope.display_gallery = function(){
scope.viewForm = !scope.viewForm;
scope.newsID = null;
scope.showLoader = true;
scope.update_gallery();
}
scope.selectThumb = function(index, link, backToEdit){
scope.selectedIndex = null;
scope.selectedIndex = index;
scope.newsData.news_picture = link;
$timeout(function(){
backToEdit ? scope.backToEdit() : scope.backToAdd();
}, 500);
}
scope.delete_picture = function(id, url){
scope.alertMsg = true;
scope.viewGallery = !scope.viewGallery;
scope.pictureToDelete = id;
scope.pictureUrl = url;
scope.messageAdd = '<i class="fa fa-exclamation-triangle"></i> Warning: Delete confirmation.'
}
scope.confirm_delete_picture = function(){
CommonMain.delete_picture(scope.pictureToDelete).then( function(d) {
// success
if(d){
scope.deleteConfirm = d;
console.log(scope.deleteConfirm);
scope.backToGallery();
}
}, function(d) {
// request rejected (error)
$scope.newsObj = {};
});
return;
}
scope.showFileUploader = function(){
scope.showUploader = !scope.showUploader;
}
scope.backToGallery =function(){
scope.mainTitle = '<i class="fa fa-file-image-o"></i> Manage Gallery';
scope.messageAdd = '<i class="fa fa-info-circle"></i> Select a picture from the list below';
scope.alertMsg = false;
scope.update_gallery();
}
scope.backToAdd =function(){
if (!scope.showConfirm){
scope.mainTitle = '<i class="fa fa-plus-circle"></i> Add News';
scope.messageAdd = '<i class="fa fa-info-circle"></i> Edit News in the list below';
scope.viewForm = !scope.viewForm;
scope.viewGallery = !scope.viewGallery;
} else {
scope.mainTitle = '<i class="fa fa-plus-circle"></i> Add News';
scope.messageAdd = '<i class="fa fa-info-circle"></i> Edit News in the list below';
scope.showConfirm = !scope.showConfirm;
scope.viewForm = !scope.viewForm;
}
}
scope.backToList = function(){
scope.showeditNews = !scope.showeditNews;
scope.newsID = null;
scope.showConfirm = false;
scope.update_news();
}
scope.backToEdit = function(){
scope.newsID = scope.newsData.news_id;
scope.showConfirm = false;
scope.viewGallery = !scope.viewGallery;
}
scope.morphyBtn();
}
};
});
<file_sep>/app/scripts/directives/uploadfile.js
'use strict';
/**
* @ngdoc directive
* @name adminApp.directive:uploadfile
* @description
* # uploadfile
*/
angular.module('adminApp')
.directive('uploadfile', function ($sce) {
return {
templateUrl: 'views/uploadfile.html',
restrict: 'AE',
link: function postLink(scope, element, attrs) {
scope.uploadData = {};
scope.uploadData.section = attrs.section;
console.log(scope.uploadData.section)
scope.actionUrl = $sce.trustAsResourceUrl('http://giorgioscappaticcio.co.uk/skin_ink/admin/queries/upload_file.php');
var options = {
data: { section: scope.uploadData.section},
//target: '#output', // target element(s) to be updated with server response
beforeSubmit: beforeSubmit, // pre-submit callback
success: afterSuccess, // post-submit callback
uploadProgress: OnProgress, //upload progress callback
resetForm: true // reset the form after successful submit
};
//function after succesful file upload (when server response)
function afterSuccess(data)
{
var json = JSON.parse(data);
$('#submit-btn').show(); //hide submit button
$('#loading-img').hide(); //hide submit button
$('#progressbox').delay( 1000 ).fadeOut(function(){
if(!json.success){
$('#output').text('Error: ' + json.msg);
scope.refresh_gallery();
} else {
$('#output').text('Success: ' + json.msg);
scope.refresh_gallery();
}
}); //hide progress bar
}
//function to check file size before uploading.
function beforeSubmit(){
//check whether browser fully supports all File API
if (window.File && window.FileReader && window.FileList && window.Blob)
{
if( !$('#FileInput').val()) //check empty input filed
{
$("#output").html("Are you kidding me?");
return false
}
var fsize = $('#FileInput')[0].files[0].size; //get file size
var ftype = $('#FileInput')[0].files[0].type; // get file type
//allow file types
switch(ftype)
{
case 'image/png':
case 'image/gif':
case 'image/jpeg':
case 'image/pjpeg':
case 'text/plain':
case 'text/html':
case 'application/x-zip-compressed':
case 'application/pdf':
case 'application/msword':
case 'application/vnd.ms-excel':
case 'video/mp4':
break;
default:
$("#output").html("<b>"+ftype+"</b> Unsupported file type!");
return false
}
//Allowed file size is less than 5 MB (1048576)
if(fsize>5242880)
{
$("#output").html("<b>"+bytesToSize(fsize) +"</b> Too big file! <br />File is too big, it should be less than 5 MB.");
return false
}
$('#submit-btn').hide(); //hide submit button
$('#loading-img').show(); //hide submit button
$("#output").html("");
}
else
{
//Output error to older unsupported browsers that doesn't support HTML5 File API
$("#output").html("Please upgrade your browser, because your current browser lacks some new features we need!");
return false;
}
}
//progress bar function
function OnProgress(event, position, total, percentComplete)
{
//Progress bar
$('#progressbox').show();
$('#progressbar').width(percentComplete + '%') //update progressbar percent complete
$('#statustxt').html(percentComplete + '%'); //update status text
if(percentComplete>50)
{
$('#statustxt').css('color','#000'); //change status text to white after 50%
}
}
//function to format bites bit.ly/19yoIPO
function bytesToSize(bytes) {
var sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB'];
if (bytes == 0) return '0 Bytes';
var i = parseInt(Math.floor(Math.log(bytes) / Math.log(1024)));
return Math.round(bytes / Math.pow(1024, i), 2) + ' ' + sizes[i];
}
$('#MyUploadForm').submit(function() {
$(this).ajaxSubmit(options);
return false;
});
}
};
});
|
4e0106f92189c99da89202eadf91fdb288a913c9
|
[
"JavaScript"
] | 5 |
JavaScript
|
giorgioscappaticcio/skinandinkadmin_new
|
5fa19513fb60cad79b707520e1f881966e33d06e
|
e9bad1e5a9e2dd100e12db0f5fccc85e31eca169
|
refs/heads/master
|
<file_sep># gm-qiita-infinite-more
> Qiitaใฎใใฃใผใใซใชใผใใชใญใผใใ่ฟฝๅ ใใGreaseMonkeyในใฏใชใใ
## ใคใณในใใผใซ
[ใคใณในใใผใซ](https://raw.githubusercontent.com/akameco/gm-qiita-infinite-more/master/gm-qiita-infinite-more.user.js)
## ใใข

## License
MIT ยฉ [akameco](http://akameco.github.io)
<file_sep>// ==UserScript==
// @name gm-qiita-infinite-more
// @namespace gm-qiita-infinite-more
// @include http://qiita.com/*
// @version 1
// @grant none
// ==/UserScript==
/* global $ */
function infinitelyMore() {
const btn = $('.more-button').get(0);
if (!btn) {
return;
}
const wHeight = window.innerHeight;
const wScroll = window.pageYOffset || document.scrollTop;
const btnOffset = $('.more-button').offset().top;
if (wScroll > (btnOffset - wHeight)) {
btn.click();
}
}
document.addEventListener('DOMContentLoaded', () => {
window.addEventListener('scroll', infinitelyMore);
window.addEventListener('resize', infinitelyMore);
});
|
45e98b6467c6f8972844b6f1f3f5a4c426c15c27
|
[
"Markdown",
"JavaScript"
] | 2 |
Markdown
|
akameco/gm-qiita-infinite-more
|
c697b437acc686689b79515534da5b7d479d283b
|
8f918a8d2206aab276a2dbebaaafaf55af9878ad
|
refs/heads/main
|
<repo_name>cygsec/pyAutoGui<file_sep>/openTerminal.py
import pyautogui
import webbrowser
webbrowser.open('http://www.google.com')
# opens search
with pyautogui.hold('command'):
pyautogui.press(['space'])
# searches for term
pyautogui.press(['t','e','r','m'])
# hits enter to open terminal
pyautogui.press(['enter'])
# the rest of this script will likely need sleep commands on it to function
# starts firefox profile script
# pyautogui.press(['.','/','f','i','r','e','f'])
# hits tab to autocomplete
# pyautogui.press(['tab'])
# hits enter to firefox profiles
# pyautogui.press(['enter'])<file_sep>/locateOnScreen-LONG.py
# this script will practice locating calc7key.png stored in the dir we run the script then clicking it
import pyautogui
button7location = pyautogui.locateOnScreen('calc7key.png')
button7location
Box(left=1416, top=562, width=50, height=41)
button7location[0]
1416
button7location.left
1416
button7point = pyautogui.center(button7location)
button7point
Point(x=1441, y=582)
button7point[0]
1441
button7point.x
1441
button7x, button7y = button7point
pyautogui.click(button7x, button7y) # clicks the center of where the 7 button was found
pyautogui.click('calc7key.png') # a shortcut version to click on the center of where the 7 button was found<file_sep>/moveMouseto.py
import pyautogui, sys
# use the display-mouse constant to see the mouse position and then use those numbers
pyautogui.moveTo(1585, 1000) # moves mouse to X of 1585, Y of 1000.
# = = = = = = = = = =
# CLICKING
# pyautogui.click() # click the mouse
# pyautogui.click(button='right') # right-click the mouse
# pyautogui.doubleClick() # perform a left-button double click
# ADDITIONAL FUNCTIONS
# pyautogui.moveTo(100, 200, 2) # moves mouse to X of 100, Y of 200 over 2 seconds
# pyautogui.moveTo(100, 200) # moves mouse to X of 100, Y of 200.
# pyautogui.move(0, 50) # move the mouse down 50 pixels.
# pyautogui.move(-30, 0) # move the mouse left 30 pixels.
# pyautogui.move(-30, None) # move the mouse left 30 pixels.
# = = = = = = = = = =
# MOUSE DRAGGING
# pyautogui.dragTo(100, 200, button='left') # drag mouse to X of 100, Y of 200 while holding down left mouse button
# pyautogui.dragTo(300, 400, 2, button='left') # drag mouse to X of 300, Y of 400 over 2 seconds while holding down left mouse button
# pyautogui.drag(30, 0, 2, button='right') # drag the mouse left 30 pixels over 2 seconds while holding down the right mouse button<file_sep>/README.md
# python
various python scripts
<file_sep>/speedTest.py
import pyautogui
import webbrowser
webbrowser.open('http://www.google.com')
# searches for term
pyautogui.sleep(3)
pyautogui.press(['s','p','e','e','d','space','t','e','s','t'])
pyautogui.press(['enter'])
pyautogui.sleep(.5)
pyautogui.press(['tab'])
pyautogui.sleep(.5)
pyautogui.press(['tab'])
pyautogui.sleep(.5)
pyautogui.press(['tab'])
pyautogui.sleep(.5)
pyautogui.press(['tab'])
pyautogui.sleep(.5)
pyautogui.press(['tab'])
pyautogui.sleep(.5)
pyautogui.press(['tab'])
# this last enter should initiate the google test
pyautogui.press(['enter'])
# this should allow the test to run you may need to adjust the timer to be longer
pyautogui.sleep(30)
# the following will take the screenshot and save the file
im1 = pyautogui.screenshot()
im2 = pyautogui.screenshot('google_speedtest.png')
# this will quit the browser
with pyautogui.hold('command'):
pyautogui.press(['q'])<file_sep>/locateOnScreen.py
import pyautogui
import time
time.sleep(2)
pyautogui.click('calc7key.png')<file_sep>/automate-outlook.py
import pyautogui, sys
# display mouse position in real time
pyautogui.DisplayMouseposition()
<file_sep>/moveToStart.py
import pyautogui
start = pyautogui.locateCenterOnScreen('start.png') #If the file is not a png file it will not work
print(start)
pyautogui.moveTo(start) #Moves the mouse to the coordinates of the image
<file_sep>/takeScreenshot.py
import pyautogui
im1 = pyautogui.screenshot()
im2 = pyautogui.screenshot('my_screenshot.png')<file_sep>/whileLoop.py
# set variable condition to 1
condition = 1
while condition < 10:
print (condition)
# this will take the condition variable and add 1 to it, this should print out every time
condition += 1
# this will constantly be set to true until keyboard interrupt
while True:
print('doing stuff')<file_sep>/openBrowser.py
# this script will open a browser and type the word 'test' (note that the sleep timer may need to be adjusted depending on machine speed)
import pyautogui
import webbrowser
webbrowser.open('http://www.google.com')
# searches for term
pyautogui.sleep(2)
pyautogui.press(['t','e','s','t'])
pyautogui.press(['enter'])
|
9607c20277c1fef7f18a08f7b5d401369c4b5420
|
[
"Markdown",
"Python"
] | 11 |
Python
|
cygsec/pyAutoGui
|
8bb566986be9bd87c3c3ebed75d372f451375a07
|
e79d8af7acd6c213232e4efa87d55d33481b714b
|
refs/heads/main
|
<file_sep>// Funzioni --------------------------------------------------------------------
function numberRandomizer(min, max) {
return Math.floor(Math.random() * (max + 1 - min) + min);
}
// -----------------------------------------------------------------------------
var quantityOfRandomNumbers = 5;
var minRandomValue = 1;
var maxRandomValue = 99;
var milliseconds = 30000;
var numberList = [];
for (var i = 1; i <= quantityOfRandomNumbers; i++) {
numberList.push(numberRandomizer(minRandomValue, maxRandomValue))
}
console.log(numberList);
var seconds = milliseconds / 1000;
alert('Osserva bene questi numeri: ' + numberList + '. Dopo che avrai chiuso questo alert passeranno ' + seconds + ' secondi e poi ti chiederรฒ i numeri che hai visto uno alla volta. Vediamo quanti ne azzecchi?')
var rightNumbers = 0;
setTimeout(function(){
for (var i = 1; i <= quantityOfRandomNumbers; i++) {
var userNumberInput = parseInt(prompt('Numero ' + i));
if (numberList.includes(userNumberInput)) {
rightNumbers++;
}
}
alert('Hai indovinato ' + rightNumbers + ' numeri.')
}, milliseconds);
|
fe2041d33c5e087dab161543eaa058e23fba1ebc
|
[
"JavaScript"
] | 1 |
JavaScript
|
gabr-moragarm-f/js-simon
|
1c2495d82daca295507d5b09aed3c467d760ea74
|
a63c2080ce2c1830eb5b1e397a95666e7c295401
|
refs/heads/master
|
<repo_name>Siorski/ruby<file_sep>/spec/views/movies/show.html.erb_spec.rb
require 'spec_helper'
describe "movies/show" do
before(:each) do
@movie = assign(:movie, stub_model(Movie,
:tytul => "MyText",
:rezyser => "Rezyser",
:gatunek => "Gatunek",
:dlugosc => 1,
:ocena_filmweb => "9.99",
:ocena_imdb => "9.99",
:moja_ocena => "9.99"
))
end
it "renders attributes in <p>" do
render
# Run the generator again with the --webrat flag if you want to use webrat matchers
rendered.should match(/MyText/)
rendered.should match(/Rezyser/)
rendered.should match(/Gatunek/)
rendered.should match(/1/)
rendered.should match(/9.99/)
rendered.should match(/9.99/)
rendered.should match(/9.99/)
end
end
<file_sep>/spec/views/movies/new.html.erb_spec.rb
require 'spec_helper'
describe "movies/new" do
before(:each) do
assign(:movie, stub_model(Movie,
:tytul => "MyText",
:rezyser => "MyString",
:gatunek => "MyString",
:dlugosc => 1,
:ocena_filmweb => "9.99",
:ocena_imdb => "9.99",
:moja_ocena => "9.99"
).as_new_record)
end
it "renders new movie form" do
render
# Run the generator again with the --webrat flag if you want to use webrat matchers
assert_select "form", :action => movies_path, :method => "post" do
assert_select "textarea#movie_tytul", :name => "movie[tytul]"
assert_select "input#movie_rezyser", :name => "movie[rezyser]"
assert_select "input#movie_gatunek", :name => "movie[gatunek]"
assert_select "input#movie_dlugosc", :name => "movie[dlugosc]"
assert_select "input#movie_ocena_filmweb", :name => "movie[ocena_filmweb]"
assert_select "input#movie_ocena_imdb", :name => "movie[ocena_imdb]"
assert_select "input#movie_moja_ocena", :name => "movie[moja_ocena]"
end
end
end
<file_sep>/README.rdoc
Projekt aplikacji napisnej w ruby.
Logowanie przez Twittera.
Wyszukiwanie po tytule.
Heroku http://rubymovies.herokuapp.com/
<file_sep>/spec/views/movies/index.html.erb_spec.rb
require 'spec_helper'
describe "movies/index" do
before(:each) do
assign(:movies, [
stub_model(Movie,
:tytul => "MyText",
:rezyser => "Rezyser",
:gatunek => "Gatunek",
:dlugosc => 1,
:ocena_filmweb => "9.99",
:ocena_imdb => "9.99",
:moja_ocena => "9.99"
),
stub_model(Movie,
:tytul => "MyText",
:rezyser => "Rezyser",
:gatunek => "Gatunek",
:dlugosc => 1,
:ocena_filmweb => "9.99",
:ocena_imdb => "9.99",
:moja_ocena => "9.99"
)
])
end
it "renders a list of movies" do
render
# Run the generator again with the --webrat flag if you want to use webrat matchers
assert_select "tr>td", :text => "MyText".to_s, :count => 2
assert_select "tr>td", :text => "Rezyser".to_s, :count => 2
assert_select "tr>td", :text => "Gatunek".to_s, :count => 2
assert_select "tr>td", :text => 1.to_s, :count => 2
assert_select "tr>td", :text => "9.99".to_s, :count => 2
assert_select "tr>td", :text => "9.99".to_s, :count => 2
assert_select "tr>td", :text => "9.99".to_s, :count => 2
end
end
<file_sep>/app/models/movie.rb
class Movie < ActiveRecord::Base
attr_accessible :dlugosc, :gatunek, :moja_ocena, :ocena_filmweb, :ocena_imdb, :rezyser, :tytul
validates :tytul, presence: true
validates :dlugosc, :numericality => { :greater_than_or_equal_to => 0 }
validates :ocena_filmweb, :numericality => { only_float: true, :greater_than_or_equal_to => 0 }
validates :ocena_imdb, :numericality => { only_float: true, :greater_than_or_equal_to => 0 }
def self.search(tytul)
if tytul.present?
find(:all, :conditions => ['Tytul like ?', "%#{tytul}%"])
else
scoped
end
end
end
<file_sep>/Gemfile
source 'https://rubygems.org'
ruby '1.9.3'
gem 'rails', '~> 3.2.9'
gem 'json', '~> 1.7.5'
gem 'simple_form', '~> 2.0.4'
gem 'sqlite3', '~> 1.3.6', :groups => [:test, :development]
gem 'pg', '~> 0.14.1', :groups => :production
gem 'capybara', '~> 1.1.2', :groups => :test
gem 'heroku'
gem 'twitter-bootstrap-rails'
gem 'therubyracer'
gem 'libv8', '~> 3.11.8'
group :assets do
gem 'less-rails'
gem 'coffee-rails', '~> 3.2.1'
gem 'uglifier', '>= 1.0.3'
gem 'jquery-ui-rails', '~> 2.0.2'
gem 'jquery-datatables-rails', '~> 1.11.1'
end
gem 'jquery-rails', '~> 2.1.3'
group :development, :test do
gem 'hirb', '~> 0.7.0'
gem 'quiet_assets', '~> 1.0.1' # wylacza logowanie *assets pipeline*
gem 'rspec-rails', '~> 2.11.0'
end
gem 'omniauth-twitter'
gem 'thin'
<file_sep>/app/controllers/application_controller.rb
class ApplicationController < ActionController::Base
protect_from_forgery
helper_method :current_user # make it visible in views
private
def current_user
@current_user ||= User.find(session[:user_id]) if session[:user_id]
end
end
<file_sep>/config/initializers/omniauth.rb
Rails.application.config.middleware.use OmniAuth::Builder do
provider :twitter, "Z1UH5MoIyS4TySxox7LmRw", "<KEY>"
end
|
782934643f8959497e9785639ac8376b06bcc321
|
[
"RDoc",
"Ruby"
] | 8 |
Ruby
|
Siorski/ruby
|
5990dd03147f63650292a6646cabfd74198e4bcd
|
4637ea264a6e1dae8e5d6dfde5b7b5b52f3f2709
|
refs/heads/master
|
<repo_name>cahlchang/ssl_certificate_exporter<file_sep>/README.md
SSL Certificate Exporter
========================
Exporter for SSL Certificate metrics https://prometheus.io/
## Configuration
1. Write the domain you want to check the expiration date of the ssl to JSON File.
```json
{"domains":["example.com","example.net","example.org"],"isLocal":false}
```
2. Hosting the JSON File to the web, such as Gist and S3.
3. Specify it in the environment variable CONFIG_URL.
ex. https://gist.githubusercontent.com/s-aska/03c41cf0d3f8b369cf0ae80d02a26c02/raw/3c742b80c4c1c7e79fb6705cda19808efb8048eb/config.json
## Run
### Local machine
```sh
git clone <EMAIL>:s-aska/ssl_certificate_exporter.git
cd s-aska/ssl_certificate_exporter
go build
env \
PORT=9100 \
CONFIG_URL="https://.../config.json" \
./ssl_certificate_exporter
```
### Heroku
[](https://heroku.com/deploy)
### Docker
```
docker pull aska/ssl_certificate_exporter
docker run -e PORT=9100 \
-e CONFIG_URL="https://.../config.json" \
-p 9100:9100 \
--name ssl_certificate_exporter \
--rm \
aska/ssl_certificate_exporter
```
## Reloading configuration
```sh
curl -X POST http://XXX.XXX.XXX.XXX:9100/-/reload
```
## Grafana (example)
```
Query: ssl_certificate_expires
Legend format: {{domain}}
Axes Left Y Unit: seconds(s)
```

<file_sep>/main.go
package main
import (
"crypto/tls"
"encoding/json"
"github.com/prometheus/client_golang/prometheus"
"log"
"math"
"net/http"
"os"
"strings"
"sync"
"time"
"fmt"
)
const namespace = "ssl_certificate"
type exporter struct {
expires *prometheus.GaugeVec
}
type config struct {
Domains []string `json:"domains"`
IsLocal bool `json:"isLocal"`
}
var configUrl string
var domains []string
var isLocal bool
var m = new(sync.Mutex)
func newExporter() *exporter {
return &exporter{
expires: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Name: "expires",
Help: "expires",
},
[]string{
"domain",
},
),
}
}
func (e *exporter) Describe(ch chan<- *prometheus.Desc) {
e.expires.Describe(ch)
}
func (e *exporter) Collect(ch chan<- prometheus.Metric) {
m.Lock()
defer m.Unlock()
for _, domain := range domains {
s := check(domain)
if s != math.NaN() {
e.expires.WithLabelValues(domain).Set(s)
}
}
e.expires.Collect(ch)
}
func check(domain string) float64 {
config := tls.Config{}
var addr = ""
if isLocal {
config.ServerName = domain
addr = "127.0.0.1:443"
} else {
addr = domain + ":443"
}
conn, err := tls.Dial("tcp", addr, &config)
if err != nil {
log.Fatal("domain:" + domain + " error:" + err.Error())
return math.NaN()
}
state := conn.ConnectionState()
certs := state.PeerCertificates
defer conn.Close()
duration := certs[0].NotAfter.Unix() - time.Now().Unix()
return float64(duration)
}
func load() {
if configUrl == "" {
return
}
resp, err := http.Get(configUrl)
if err != nil {
log.Print(err)
return
}
defer resp.Body.Close()
if resp.StatusCode > 201 {
log.Printf("Failure loading url:%v code:%v error:%v", configUrl, resp.StatusCode, resp.Status)
return
}
var config config
dec := json.NewDecoder(resp.Body)
dec.Decode(&config)
m.Lock()
defer m.Unlock()
domains = config.Domains
isLocal = config.IsLocal
log.Printf("Successful loading domains:%v", strings.Join(domains, ","))
}
func main() {
exporter := newExporter()
prometheus.MustRegister(exporter)
http.Handle("/metrics", prometheus.Handler())
http.HandleFunc("/-/reload", reload)
port := ":" + os.Getenv("PORT")
// Sample
// https://gist.githubusercontent.com/s-aska/03c41cf0d3f8b369cf0ae80d02a26c02/raw/3c742b80c4c1c7e79fb6705cda19808efb8048eb/config.json
configUrl = os.Getenv("CONFIG_URL")
if configUrl == "" {
log.Fatal("Missing ENV CONFIG_URL")
}
load()
if len(domains) == 0 {
log.Fatal("Missing domains for config")
}
log.Print("Listening 127.0.0.1", port)
log.Fatal(http.ListenAndServe(port, nil))
}
func reload(w http.ResponseWriter, r *http.Request) {
load()
fmt.Fprintf(w, "Reloading configuration file... domains:%v", strings.Join(domains, ","))
}
<file_sep>/Dockerfile
FROM golang:onbuild
EXPOSE 9100
|
bfb9b357356ab87eb37007900e7138cfa043a579
|
[
"Markdown",
"Go",
"Dockerfile"
] | 3 |
Markdown
|
cahlchang/ssl_certificate_exporter
|
d31e99f223df9a3e7df13e375bd661df2576b4be
|
18cb37a6576be8370750f5a6fe1133d62f6c0d33
|
refs/heads/master
|
<file_sep><?php
/**
* @author <NAME> <<EMAIL>>
* @license The MIT License (MIT)
* @copyright <NAME>
* @version 1.0
*/
namespace jlorente\validators;
use yii\validators\Validator;
use Yii;
use yii\db\IntegrityException;
use yii\base\InvalidConfigException;
/**
* Validates a value againts the identifier of another model.
*
* @author <NAME> <<EMAIL>>
*/
class IntegrityValidator extends Validator {
public $className;
public $isArray = false;
public $field = null;
/**
* @inheritdoc
*/
public function init() {
parent::init();
if (empty($this->className)) {
throw InvalidConfigException('Property "className" must be provided');
}
}
/**
* @inheritdoc
*/
protected function validateValue($value) {
if ($this->isArray === true) {
foreach ($value as $v) {
$r = $this->_validateValue($v);
if ($r !== null) {
return $r;
}
}
return null;
} else {
return $this->_validateValue($value);
}
}
private function _validateValue($value) {
$class = $this->className;
if ($this->field === null) {
$obj = $class::findOne($value);
} else {
$obj = $class::find([$this->field => $value])->one();
}
if ($obj === null) {
throw new IntegrityException("Integrity constant violation. {$class} with primary key {$value} doesn't exist");
} else {
return null;
}
}
/**
* @inheritdoc
*/
public function clientValidateAttribute($model, $attribute, $view) {
$class = $this->className;
$options = [
'message' => Yii::$app->getI18n()->format($this->message, [
'class' => $class::className(),
'value' => is_array($model->$attribute) ? json_encode($model->$attribute) : $model->$attribute
], Yii::$app->language),
];
return '';
}
}
<file_sep><?php
/**
* @author <NAME> <<EMAIL>>
* @license The MIT License (MIT)
* @copyright <NAME>
* @version 1.0
*/
namespace jlorente\validators;
use yii\validators\RegularExpressionValidator;
use Yii;
/**
* NameValidator validates that the attribute value matches to the specified
* pattern regular expression.
*
* @author <NAME> <<EMAIL>>
*/
class NameValidator extends RegularExpressionValidator {
/**
* @inheritdoc
*/
public function init() {
$this->pattern = '/^[a-zA-Zร-รร-รถรธ-รฟ]+(([.\'ยชยบ]{1}[\s]?|[\s\-]{1})[a-zA-Zร-รร-รถรธ-รฟ]+)*[.ยชยบ]?$/u';
if ($this->message === null) {
$this->message = Yii::t('validator', "A name can consist of Latin alphabetic characters. It can contain points, apostrophes ['] and ordinals [ยบยช] as terminators of words, and blank spaces [ ] or dashes [-] as separator characters. A name can not contain more than one successive separator character.");
}
parent::init();
}
}
<file_sep><?php
/**
* @author <NAME> <<EMAIL>>
* @license The MIT License (MIT)
* @copyright <NAME>
* @version 1.0
*/
namespace jlorente\validators;
use yii\validators\RegularExpressionValidator;
use Yii;
/**
* AddressValidator validates that the attribute value matches to the specified.
*
* @author <NAME> <<EMAIL>>
*/
class AddressValidator extends RegularExpressionValidator {
/**
* @inheritdoc
*/
public function init() {
$this->pattern = '/^[a-zA-Zร-รร-รถรธ-รฟ0-9]+(([,.\'\/ยบยช]{1}[\s]?|[ยบยช]{1}[\-]?|[\s\-]{1})[a-zA-Zร-รร-รถรธ-รฟ0-9]+)*\.?$/u';
if ($this->message === null) {
$this->message = Yii::t('validator', "An address can consist of Latin alphabetic characters. It can contain punctuation marks like points [.], commas [,], slashes [/] and apostrophes ['] followed by a blank space, ordinal [ยบยช] as terminator of word and blank spaces [ ] or dashes [-] as word separator characters. An address can't contain more than one successive separator character.");
}
parent::init();
}
}
<file_sep><?php
/**
* @author <NAME> <<EMAIL>>
* @license The MIT License (MIT)
* @copyright <NAME>
* @version 1.0
*/
namespace jlorente\validators;
use Yii;
use yii\helpers\Json;
use yii\validators\RegularExpressionValidator;
/**
* CifValidator to validate spanish CIF's.
*
* @author <NAME> <<EMAIL>>
*/
class CifValidator extends RegularExpressionValidator
{
/**
* Organization - Leading letter table
*
* @var string
*/
protected static $organization = [
'int' => [
'A', 'B', 'E', 'H',
]
, 'char' => [
'N', 'P', 'Q', 'R', 'S', 'W'
]
, 'other' => [
'C', 'D', 'F', 'G', 'J', 'K', 'L', 'M', 'U', 'V'
]
];
/**
* Control digit table.
*
* @var string
*/
protected static $table = [
'0', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J'
];
/**
* If true the value must be written with the control digit.
*
* @var boolean
*/
public $withDC = true;
/**
* If withLetter is false and setLetter is true, the CIF will be returned
* with the corresponding control digit.
*
* @var boolean
*/
public $setDC = false;
/**
*
* @var boolean
*/
public $caseInsensitive = true;
/**
*
* @var type
*/
public $messages = [];
/**
*
* @var string
*/
protected $_value;
/**
* @inheritdoc
*/
public function init()
{
$this->ensureValidators();
$this->ensureMessages();
parent::init();
}
/**
* @inheritdoc
*/
public function validateAttribute($model, $attribute)
{
$result = $this->validateValue($model->$attribute);
if (!empty($result)) {
$this->addError($model, $attribute, $result[0], $result[1]);
}
$model->$attribute = $this->_value;
}
/**
* @inheritdoc
*/
public function validateValue($value)
{
$valid = parent::validateValue($value);
if ($valid !== null) {
return [$this->messages['patternError'], []];
}
$organization = substr($value, 0, 1);
$number = substr($value, 1, 7);
$sum = 0;
//The specification talks about odds and evens but one based. So here the odds and evens vars are exchanged.
for ($i = 0, $l = strlen($number); $i < $l; ++$i) {
$n = $number[$i];
if ($i % 2 === 0) {
$n = array_sum(str_split($n * 2));
}
$sum += $n;
}
$dcNumber = 10 - ($sum % 10);
if (array_search($organization, static::$organization['char']) !== false) {
$dc = $dcAlt = static::$table[$dcNumber];
} elseif (array_search($organization, static::$organization['other']) !== false) {
$dc = static::$table[$dcNumber];
$dcAlt = $dcNumber === 10 ? '0' : (string) $dcNumber;
} else {
$dc = $dcAlt = $dcNumber === 10 ? '0' : (string) $dcNumber;
}
if ($this->withDC === true && in_array(substr($value, -1), [$dc, $dcAlt]) === false) {
return [$this->messages['controlDigitError'], []];
} elseif ($this->setDC === true) {
$value .= is_numeric($dcAlt) === false ? $dcAlt : $dc;
}
$this->_value = $value;
return;
}
/**
* Gets the value after validation.
*
* @return string
*/
public function getNewValue()
{
return $this->_value;
}
/**
* @inheritdoc
*/
public function clientValidateAttribute($model, $attribute, $view)
{
$organization = Json::encode(static::$organization);
$table = Json::encode(static::$table);
$errorMessage = Json::encode($this->messages['controlDigitError']);
$js = parent::clientValidateAttribute($model, $attribute, $view);
if ($this->withDC === true) {
$js .= <<<JS
(function() {
if (value.length) {
var org, number, sum = 0, i, j, dcNumber, dc, dcAlt, tableOrg, tableDC, n, aux;
tableOrg = $organization;
tableDC = $table;
org = value.substr(0, 1);
number = value.substr(1, 7);
for (i = 0; i < number.length; i += 1) {
n = parseInt(number[i]);
if (i % 2 === 0) {
n = 0;
aux = (number[i] * 2).toString();
for (j = 0; j < aux.length; j += 1) {
n += parseInt(aux[j]);
}
}
sum += n;
}
dcNumber = 10 - (sum % 10);
if (tableOrg.char.indexOf(org) !== -1) {
dc = dcAlt = tableDC[dcNumber];
} else if (tableOrg.other.indexOf(org) !== -1) {
dc = tableDC[dcNumber];
dcAlt = dcNumber === 10 ? '0' : dcNumber.toString();
} else {
dc = dcAlt = dcNumber === 10 ? '0' : dcNumber.toString();
}
if (dc !== value.substr(-1) && dcAlt !== value.substr(-1)) {
yii.validation.addMessage(messages, $errorMessage, value);
}
}
})();
JS;
}
return $js;
}
/**
* Ensures the error messages of the validator.
*/
protected function ensureMessages()
{
$patternError = 'The valid format for CIF is a letter followed by 7 digits';
if ($this->withDC === true) {
$patternError .= ' and an ending control digit';
}
$this->messages = array_merge([
'controlDigitError' => Yii::t('yii', 'The control digit don\'t correspond to the number')
, 'patternError' => Yii::t('yii', $patternError)
], $this->messages);
$this->message = $this->messages['patternError'];
}
/**
* Ensures the format of the validator.
*/
protected function ensureValidators()
{
$intTable = implode('', static::$organization['int']);
$charTable = implode('', static::$organization['char']);
$otherTable = implode('', static::$organization['other']);
$int = "[$intTable]{1}[0-9]{7}";
$char = "[$charTable]{1}[0-9]{7}";
$other = "[$otherTable]{1}[0-9]{7}";
if ($this->withDC) {
$int .= '[0-9]{1}';
$char .= '[0A-J]{1}';
$other .= '[0-9A-J]{1}';
}
$this->pattern = "/^$int$|^$char$|^$other$/" . ($this->caseInsensitive === true ? 'i' : '');
}
/**
* Gets the value splited in text and number parts.
*
* @param string $value
* @return array
*/
protected function extractParts($value)
{
$split = [
substr($value, 0, 1)
, substr($value, 1, 7)
];
if ($this->withDC) {
$split[] = substr(7, 1);
}
return $split;
}
}
<file_sep><?php
/**
* @author <NAME> <<EMAIL>>
* @license The MIT License (MIT)
* @copyright <NAME>
* @version 1.0
*/
namespace jlorente\validators;
use yii\validators\FilterValidator;
use yii\helpers\HtmlPurifier;
/**
* Filters the content coming from the redactor widget.
*
* @author <NAME> <<EMAIL>>
*/
class RichTextFilterValidator extends FilterValidator {
public $allowedTags = '<code><span><div><label><a><br><p><b><i><del><strike><u><img><video><audio><iframe><object><embed><param><blockquote><mark><cite><small><ul><ol><li><hr><dl><dt><dd><sup><sub><big><pre><code><figure><figcaption><strong><em><table><tr><td><th><tbody><thead><tfoot><h1><h2><h3><h4><h5><h6>';
/**
* @inheritdoc
*/
public function init() {
$this->filter = function($value) {
$value = strip_tags($value, $this->allowedTags);
return RichTextPurifier::process($value);
};
parent::init();
}
}
/**
* Configuration for the HtmlPurifier of the RedactorFilterValidator.
*
* @author <NAME> <<EMAIL>>
*/
class RichTextPurifier extends HtmlPurifier {
/**
* @inheritdoc
*/
public static function configure($config) {
$config->set('HTML.AllowedAttributes', ['img.src', '*.style', 'a.href']);
}
}
<file_sep><?php
/**
* @author <NAME> <<EMAIL>>
* @license The MIT License (MIT)
* @copyright <NAME>
* @version 1.0
*/
namespace jlorente\validators;
use Yii;
use yii\helpers\Json;
use yii\validators\RegularExpressionValidator;
/**
* NifValidator to validate spanish NIF's and NIE's documents.
*
* @author <NAME> <<EMAIL>>
*/
class NifValidator extends RegularExpressionValidator
{
/**
* Control digit table.
*
* @var string
*/
protected static $table = [
'T', 'R', 'W', 'A', 'G', 'M', 'Y', 'F', 'P', 'D', 'X', 'B', 'N', 'J', 'Z', 'S', 'Q', 'V', 'H', 'L', 'C', 'K', 'E'
];
/**
* Allowed leading letters in NIE document.
*
* @var string
*/
protected static $nieLeadingLetters = [
'X', 'Y', 'Z'
];
/**
* Validates the NIF with letter.
*
* @var boolean
*/
public $withDC = true;
/**
* If withDC is false and setDC is true, the NIF will be returned
* with the corresponding letter.
*
* @var boolean
*/
public $setDC = false;
/**
* Validates also NIE.
*
* @var boolean
*/
public $allowNie = false;
/**
*
* @var boolean
*/
public $caseInsensitive = true;
/**
*
* @var type
*/
public $messages = [];
/**
*
* @var string
*/
protected $_value;
/**
* @inheritdoc
*/
public function init()
{
$this->ensureValidators();
$this->ensureMessages();
parent::init();
}
/**
* @inheritdoc
*/
public function validateAttribute($model, $attribute)
{
$result = $this->validateValue($model->$attribute);
if (!empty($result)) {
$this->addError($model, $attribute, $result[0], $result[1]);
}
$model->$attribute = $this->_value;
}
/**
* @inheritdoc
*/
public function validateValue($value)
{
$valid = parent::validateValue($value);
if ($valid !== null) {
return [$this->messages['patternError'], []];
}
$split = [];
preg_match_all('/[0-9]+|[A-Z]+/' . ($this->caseInsensitive === true ? 'i' : ''), $value, $split);
$split = $split[0];
$nSplit = count($split);
$numberPosition = $nSplit - ($this->withDC === true ? 2 : 1);
$number = preg_replace('/^[0]+/', '', ($nSplit > 2 ? array_search($split[0], static::$nieLeadingLetters) : '') . $split[$numberPosition]);
$letter = static::$table[$number % 23];
if ($this->withDC === true && $letter !== $split[$numberPosition + 1]) {
return [$this->messages['controlDigitError'], []];
} elseif ($this->setDC === true) {
$split[$numberPosition + 1] = $letter;
}
$this->_value = implode('', $split);
return;
}
/**
* Gets the value after validation.
*
* @return string
*/
public function getNewValue()
{
return $this->_value;
}
/**
* @inheritdoc
*/
public function clientValidateAttribute($model, $attribute, $view)
{
$table = Json::encode(static::$table);
$nieDigits = Json::encode(static::$nieLeadingLetters);
$errorMessage = Json::encode($this->messages['controlDigitError']);
$typeAttribute = $model->formName() . "[last_name]";
$js = parent::clientValidateAttribute($model, $attribute, $view);
if ($this->withDC === true) {
$js .= <<<JS
(function() {
if (value.length) {
var split, nSplit, number, cLetter;
split = value.match(/(\d+|[^\d]+)/g);
nSplit = split.length;
number = ((nSplit > 2 ? $.inArray(split[0], {$nieDigits}) : '') + '' + split[nSplit - 2]).replace(/^0+/, '');
cLetter = {$table}[number % 23];
if (cLetter !== split[split.length - 1]) {
yii.validation.addMessage(messages, $errorMessage, value);
}
}
})();
JS;
}
return $js;
}
/**
* Ensures the error messages of the validator.
*/
protected function ensureMessages()
{
$this->messages = array_merge([
'controlDigitError' => Yii::t('yii', 'The letter don\'t correspond to the number.')
, 'patternError' => Yii::t('yii', 'The valid format for NIF is 8 digits followed by a valid letter and for NIE a letter followed by 7 digits and an ending letter.')
], $this->messages);
$this->message = $this->messages['patternError'];
}
/**
* Ensures the format of the validator.
*/
protected function ensureValidators()
{
$std = '^[0-9]{8}';
$nie = '^[XYZ]{1}[0-9]{7}';
if ($this->withDC) {
$letters = implode('', static::$table);
$std .= '[' . $letters . ']{1}$';
$nie .= '[' . $letters . ']{1}$';
} else {
$std .= '$';
$nie .= '$';
}
if ($this->allowNie === true) {
$std .= '|' . $nie;
}
$this->pattern = '/' . $std . '/' . ($this->caseInsensitive === true ? 'i' : '');
}
}
<file_sep><?php
/**
* @author <NAME> <<EMAIL>>
* @license The MIT License (MIT)
* @copyright <NAME>
* @version 1.0
*/
namespace jlorente\validators;
use yii\validators\RegularExpressionValidator;
/**
* ColorValidator validates that the attribute value matches a valid hexadecimal color.
* You may invert the validation logic with help of the {@link not} property (available since 1.1.5).
*
* @author <NAME> <<EMAIL>>
*/
class ColorValidator extends RegularExpressionValidator {
/**
* Pattern for color validation
*
* @var string
*/
public $pattern = '/^#[a-f0-9]{6}$/';
/**
* Returns a random color.
*
* @return string
*/
public static function randColor() {
$c = '#';
$i = 0;
while ($i++ < 3) {
$c .= str_pad(dechex(mt_rand(0, 255)), 2, '0', STR_PAD_LEFT);
}
return $c;
}
}
<file_sep><?php
/**
* @author <NAME> <<EMAIL>>
* @license The MIT License (MIT)
* @copyright <NAME>
* @version 1.0
*/
namespace custom\validators;
use Yii;
use yii\validators\Validator;
use Traversable;
/**
* CollectionValidator validates arrays and Traversable objects of the same type.
*
* @author <NAME> <<EMAIL>>
*/
class CollectionValidator extends Validator {
public $message;
public $validator;
protected $_validator;
/**
* @inheritdoc
*/
public function init() {
parent::init();
if ($this->message === null) {
$this->message = Yii::t(
'validator', '{attribute} must be an array', [
'attribute' => $attribute
]);
}
if ($this->validator !== null) {
$this->_validator = Validator::createValidator($this->validator[1], null, null, array_slice($this->validator, 2));
}
}
/**
* @inheritdoc
*/
public function validateAttribute($model, $attribute) {
$result = $this->validateValue($model->$attribute);
if ($result !== null) {
$model->addErrors($result);
}
}
/**
* @inheritdoc
*/
public function validateValue(&$value) {
$error = [];
if (!is_array($value) && !$value instanceof Traversable) {
if (empty($value)) {
$value = [];
} else {
$error[] = $this->message;
}
} elseif ($this->_validator !== null) {
foreach ($value as $el) {
$er = $this->_validator->validateValue($el);
if ($er !== null) {
$error[] = $er;
}
}
}
return empty($error) ? null : $error;
}
public function _array($attribute, $params) {
$r = true;
if (!is_array($this->$attribute)) {
if (empty($this->$attribute)) {
$this->$attribute = [];
} else {
$this->addError(
$this, $attribute, isset($params['message']) ? $params['message'] : Yii::t(
'validator', '{attribute} must be an array', [
'attribute' => $attribute
])
);
$r = false;
}
} elseif (isset($params['class'])) {
foreach ($this->$attribute as $el) {
if (is_a($el, $params['class'])) {
$this->addError(
$this, $attribute, isset($params['message']) ? $params['message'] : Yii::t(
'validator', 'Elements in {attribute} must be of class {class}.', [
'attribute' => $attribute,
'class' => $params['class']
])
);
$r = false;
}
}
}
return $r;
}
}
<file_sep>Yii2 Validators (Development Phase)
==================================
A compilation of useful validators for the Yii2 framework.
## Included Validators
**RichTextFilterValidator** - Filters the inputs of richtext texts on validation.
**AddressValidator** - Useful to validate addresses.
**ColorValidator** - Validates color inputs.
**IntegrityValidor** - Validates the integrity of the foreign keys of the models.
**OccidentalNameValidator** - Validates the names of the occidental people.
**UsernameValidator** - Validates the usernames.
## Installation
Include the package as dependency under the composer.json file.
To install, either run
```bash
$ php composer.phar require jlorente/yii2-validators "*"
```
or add
```json
...
"require": {
// ... other configurations ...
"jlorente/yii2-validators": "*"
}
```
to the ```require``` section of your `composer.json` file.
## Usage
In construction
## License
Copyright © 2015 <NAME> <<EMAIL>>.<file_sep><?php
/**
* @author <NAME> <<EMAIL>>
* @license The MIT License (MIT)
* @copyright <NAME>
* @version 1.0
*/
namespace jlorente\validators;
use Yii;
use yii\helpers\Html;
use yii\helpers\Json;
use yii\validators\RegularExpressionValidator;
use yii\web\JsExpression;
/**
* NifValidator to validate spanish NIF's and NIE's documents.
*
* @author <NAME> <<EMAIL>>
*/
class DocumentTypeValidator extends RegularExpressionValidator
{
const TYPE_NIF = 1;
const TYPE_NIE = 2;
const TYPE_CIF = 3;
const TYPE_OTHER = 4;
/**
* Control digit table.
*
* @var string
*/
protected static $organization = [
'int' => [
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'U', 'V',
]
, 'char' => [
'N', 'P', 'Q', 'R', 'S', 'W'
]
];
/**
* Control digit table.
*
* @var string
*/
protected static $table = [
'T', 'R', 'W', 'A', 'G', 'M', 'Y', 'F', 'P', 'D', 'X', 'B', 'N', 'J', 'Z', 'S', 'Q', 'V', 'H', 'L', 'C', 'K', 'E'
];
/**
* Control digit table.
*
* @var string
*/
protected static $cifTable = [
'0', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J'
];
/**
* Allowed leading letters in NIE document.
*
* @var string
*/
protected static $nieLeadingLetters = [
'X', 'Y', 'Z'
];
/**
* Validates the document type.
*
* @var int
*/
public $documentTypeAttribute = 'document_type';
/**
*
* @var int
*/
protected $documentType = self::TYPE_OTHER;
/**
* Validates the docment with letter.
*
* @var boolean
*/
public $withDC = true;
/**
* If withDC is false and setDC is true, the document will be returned
* with the corresponding letter.
*
* @var boolean
*/
public $setDC = false;
/**
*
* @var boolean
*/
public $caseInsensitive = true;
/**
*
* @var type
*/
public $messages = [];
/**
*
* @var string
*/
protected $_value;
/**
* @inheritdoc
*/
public function init()
{
$this->pattern = $this->getOtherPattern();
$this->ensureMessages();
parent::init();
}
/**
* @inheritdoc
*/
public function validateAttribute($model, $attribute)
{
$this->documentType = $model->{$this->documentTypeAttribute};
$result = $this->validateValue($model->$attribute);
if (!empty($result)) {
$this->addError($model, $attribute, $result[0], $result[1]);
}
$model->$attribute = $this->_value;
}
/**
* @inheritdoc
*/
public function validateValue($value)
{
switch ($this->documentType) {
case self::TYPE_NIF:
return $this->validateNif($value);
case self::TYPE_NIE:
return $this->validateNie($value);
case self::TYPE_CIF:
return $this->validateCif($value);
case self::TYPE_OTHER:
default:
return $this->validateOther($value);
}
}
/**
*
* @param string $value
*/
public function validateNif($value)
{
$this->pattern = $this->getNifPattern();
$valid = parent::validateValue($value);
if ($valid !== null) {
return [$this->messages['patternErrorNif'], []];
}
$split = [];
preg_match_all('/[0-9]+|[A-Z]+/' . ($this->caseInsensitive === true ? 'i' : ''), $value, $split);
$fragments = $split[0];
$number = (int) preg_replace('/^[0]+/', '', $fragments[0]);
$letter = static::$table[$number % 23];
if ($this->withDC === true && $letter !== $fragments[1]) {
return [$this->messages['controlDigitError'], []];
} elseif ($this->setDC === true) {
$fragments[1] = $letter;
}
$this->_value = implode('', $fragments);
return;
}
/**
*
* @param string $value
*/
public function validateNie($value)
{
$this->pattern = $this->getNiePattern();
$valid = parent::validateValue($value);
if ($valid !== null) {
return [$this->messages['patternErrorNie'], []];
}
$split = [];
preg_match_all('/[0-9]+|[A-Z]+/' . ($this->caseInsensitive === true ? 'i' : ''), $value, $split);
$fragments = $split[0];
$number = (int) preg_replace('/^[0]+/', '', array_search($fragments[0], static::$nieLeadingLetters) . $fragments[1]);
$letter = static::$table[$number % 23];
if ($this->withDC === true && $letter !== $fragments[2]) {
return [$this->messages['controlDigitError'], []];
} elseif ($this->setDC === true) {
$fragments[2] = $letter;
}
$this->_value = implode('', $fragments);
return;
}
/**
*
* @param string $value
*/
public function validateCif($value)
{
$this->pattern = $this->getCifPattern();
$valid = parent::validateValue($value);
if ($valid !== null) {
return [$this->messages['patternErrorCif'], []];
}
$organization = substr($value, 0, 1);
$number = substr($value, 1, 7);
$sum = 0;
//The specification talks about odds and evens but one based. So here the odds and evens vars are exchanged.
for ($i = 0, $l = strlen($number); $i < $l; ++$i) {
$n = $number[$i];
if ($i % 2 === 0) {
$n = array_sum(str_split($n * 2));
}
$sum += $n;
}
$dcNumber = 10 - ($sum % 10);
$dc = (string) $dcNumber;
if (array_search($organization, static::$organization['char']) !== false) {
$dc = static::$cifTable[$dcNumber];
} elseif ($dcNumber === 10) {
$dc = "0";
}
if ($this->withDC === true && $dc !== substr($value, -1)) {
return [$this->messages['controlDigitError'], []];
} elseif ($this->setDC === true) {
$value .= $dc;
}
$this->_value = $value;
return;
}
/**
*
* @param string $value
*/
public function validateOther($value)
{
$this->pattern = $this->getOtherPattern();
$valid = parent::validateValue($value);
if ($valid !== null) {
return [$this->messages['patternErrorDefault'], []];
}
$this->_value = $value;
return;
}
/**
* Gets the value after validation.
*
* @return string
*/
public function getNewValue()
{
return $this->_value;
}
/**
* @inheritdoc
*/
public function clientValidateAttribute($model, $attribute, $view)
{
$table = Json::encode(static::$table);
$nieDigits = Json::encode(static::$nieLeadingLetters);
$errorMessage = Json::encode($this->messages['controlDigitError']);
$typeAttribute = $this->documentTypeAttribute;
$organization = Json::encode(static::$organization);
$cifTable = Json::encode(static::$cifTable);
$regularExpression = Json::htmlEncode([
self::TYPE_NIF => new JsExpression(Html::escapeJsRegularExpression($this->getNifPattern()))
, self::TYPE_NIE => new JsExpression(Html::escapeJsRegularExpression($this->getNiePattern()))
, self::TYPE_CIF => new JsExpression(Html::escapeJsRegularExpression($this->getCifPattern()))
, self::TYPE_OTHER => new JsExpression(Html::escapeJsRegularExpression($this->getOtherPattern()))
]);
$typeNif = self::TYPE_NIF;
$typeNie = self::TYPE_NIE;
$typeCif = self::TYPE_CIF;
$typeOther = self::TYPE_OTHER;
$patternErrorNif = Json::encode($this->messages['patternErrorNif']);
$patternErrorNie = Json::encode($this->messages['patternErrorNie']);
$patternErrorCif = Json::encode($this->messages['patternErrorCif']);
$patternErrorDefault = Json::encode($this->messages['patternErrorDefault']);
$js = null;
if ($this->withDC === true) {
$js = <<<JS
(function(attribute, value, messages, deferred, \$form) {
var regExps = $regularExpression;
var split, nSplit, number, cLetter;
if (!value.length) {
return;
}
var documentInputName = \$form.find('#' + attribute.id).attr('name');
var typeInputName = documentInputName.substr(0, documentInputName.lastIndexOf('[')) + '[$typeAttribute]';
var type = parseInt(\$form.find('[name="' + typeInputName + '"]').val());
if (!type) {
return;
}
switch (type) {
case $typeNif:
if (!value.match(regExps[$typeNif])) {
return yii.validation.addMessage(messages, $patternErrorNif, value);
}
split = value.match(/(\d+|[^\d]+)/g);
nSplit = split.length;
number = split[nSplit - 2].replace(/^0+/, '');
cLetter = {$table}[number % 23];
if (cLetter !== split[split.length - 1]) {
yii.validation.addMessage(messages, $errorMessage, value);
}
break;
case $typeNie:
if (!value.match(regExps[$typeNie])) {
return yii.validation.addMessage(messages, $patternErrorNie, value);
}
split = value.match(/(\d+|[^\d]+)/g);
nSplit = split.length;
number = ($.inArray(split[0], {$nieDigits}) + '' + split[nSplit - 2]).replace(/^0+/, '');
cLetter = {$table}[number % 23];
if (cLetter !== split[split.length - 1]) {
yii.validation.addMessage(messages, $errorMessage, value);
}
break;
case $typeCif:
if (!value.match(regExps[$typeCif])) {
return yii.validation.addMessage(messages, $patternErrorCif, value);
}
var org, number, sum = 0, i, dcNumber, dc, tableOrg, tableDC, n, aux;
tableOrg = $organization;
tableDC = $cifTable;
org = value.substr(0, 1);
number = value.substr(1, 7);
for (i = 0; i < number.length; ++i) {
n = parseInt(number[i]);
if (i % 2 === 0) {
n = 0;
aux = (number[i] * 2).toString();
for (var j in aux) {
n += parseInt(aux[j]);
}
}
sum += n;
}
dcNumber = 10 - (sum % 10);
dc = dcNumber.toString();
if (tableOrg.char.indexOf(org) !== -1) {
dc = tableDC[dcNumber];
} else if (dcNumber === 10) {
dc = "0";
}
if (dc !== value.substr(-1)) {
yii.validation.addMessage(messages, $errorMessage, value);
}
break;
default:
if (!value.match(regExps[$typeOther])) {
yii.validation.addMessage(messages, $patternErrorDefault, value);
}
break;
}
})(attribute, value, messages, deferred, \$form);
JS;
}
return $js;
}
/**
* Ensures the format of the validator.
*/
protected function ensureValidators()
{
switch ($this->documentType) {
case self::TYPE_NIF:
$pattern = $this->getNifPattern();
break;
case self::TYPE_NIE:
$pattern = $this->getNiePattern();
break;
case self::TYPE_CIF:
$pattern = $this->getCifPattern();
break;
case self::TYPE_OTHER:
$pattern = $this->getOtherPattern();
break;
}
$this->pattern = $pattern . ($this->caseInsensitive === true ? 'i' : '');
}
/**
* Ensures the error messages of the validator.
*/
protected function ensureMessages()
{
$this->messages = array_merge([
'controlDigitError' => Yii::t('yii', 'The letter don\'t correspond to the number.')
, 'patternErrorNif' => Yii::t('yii', 'The valid format for NIF is 8 digits followed by a valid letter.')
, 'patternErrorNie' => Yii::t('yii', 'The valid format for NIE is a leading letter followed by 7 digits and an ending letter.')
, 'patternErrorCif' => Yii::t('yii', 'The valid format for CIF is a letter followed by 7 digits and an ending letter.')
, 'patternErrorDefault' => Yii::t('yii', 'The valid format for document is a string formed by letters and numbers.')
], $this->messages);
$this->message = $this->messages['patternErrorDefault'];
}
/**
*
* @return type
*/
protected function getNifPattern()
{
$std = '^[0-9]{8}';
if ($this->withDC) {
$letters = implode('', static::$table);
$std .= '[' . $letters . ']{1}$';
} else {
$std .= '$';
}
return "/$std/";
}
/**
*
* @return
*/
protected function getNiePattern()
{
$nie = '^[XYZ]{1}[0-9]{7}';
if ($this->withDC) {
$letters = implode('', static::$table);
$nie .= '[' . $letters . ']{1}$';
} else {
$nie .= '$';
}
return "/$nie/";
}
/**
*
* @return
*/
protected function getCifPattern()
{
$intTable = implode('', static::$organization['int']);
$charTable = implode('', static::$organization['char']);
$int = "[$intTable]{1}[0-9]{7}";
$char = "[$charTable]{1}[0-9]{7}";
if ($this->withDC) {
$int .= '[0-9]{1}';
$char .= '[0A-J]{1}';
}
return "/^$int$|^$char$/";
}
/**
*
* @return
*/
protected function getOtherPattern()
{
return "/^[A-Z0-9]{2,30}$/";
}
/**
* Guesses the value document type.
*
* @param string $value
* @return int|null
*/
public function guessType($value)
{
if (!$value) {
return null;
}
if ($this->validateNif($value) === null) {
return self::TYPE_NIF;
} elseif ($this->validateNie($value) === null) {
return self::TYPE_NIE;
} elseif ($this->validateCif($value) === null) {
return self::TYPE_CIF;
} else {
return self::TYPE_OTHER;
}
}
}
|
21c997ec3037d611fcdd8ba5002bae4514d559af
|
[
"Markdown",
"PHP"
] | 10 |
PHP
|
jlorente/yii2-validators
|
99641fc8afaa4e9f9a29df8b6a157a01a8073d91
|
7d7e8cb00366491820525911bd3f4f1f6914788e
|
refs/heads/master
|
<file_sep>const express=require('express');
const bodyParser=require('body-parser');
const mongoose=require('mongoose')
const cors=require('cors');
const config=require('./config/dev')
const app=express();
require('./models/User')
require('./models/Product')
mongoose.connect(config.mongoURI,{useNewUrlParser:true})
app.use(cors({
origin:"*"
}))
app.use(bodyParser.json());
require('./routes/dialogFlowRoutes')(app);
require('./routes/fulfillmentRoutes')(app)
app.listen(process.env.PORT||5000,()=>{
console.log('server is running')
})<file_sep>
const chatbot=require('../chatbot/chatbot')
// const dialogflow=require('dialogflow')
module.exports=app=>{
app.get('/',(req,res)=>{
res.send("herllll")
})
app.post('/api/df_text_query', async(req,res)=>{
let responses=await chatbot.textQuery(req.body.text);
// console.log("TEXT RESPONSE",responses)
res.send(responses)
});
app.post('/api/df_event_query',async(req,res)=>{
let responses=await chatbot.eventQuery(req.body.event)
// let response=responses[0].queryResult.fulfillmentMessages[1].text.text;
// res.send(responses)
// console.log(responses.queryResult.fulfillmentMessages[2])
res.send(responses)
});
}<file_sep>'use strict'
const dialogflow=require('dialogflow')
const config=require('../config/keys')
const structjson=require('structjson')
const mongoose = require('mongoose');
const projectID=config.googleProjectID;
const credentials={
client_email:config.googleClientEmail,
private_key:config.googlePrivateKey
};
const sessionClient=new dialogflow.SessionsClient({projectID,credentials})
const User=mongoose.model('user')
module.exports={
textQuery:async function(text){
const sessionPath=sessionClient.sessionPath(config.googleProjectID,config.dialogFlowSessionID)
let self=module.exports;
const request={
session:sessionPath,
queryInput:{
text:{
text:text,
languageCode:config.dialogFlowSessionLanguageCode
}
}
}
// console.log("CHATBOT request",request)
let responses=await sessionClient.detectIntent(request);
responses=await self.handleAction(responses)
// console.log("CHATBOT RESPONsES",responses)
return responses;
},
eventQuery:async function(event,parameters={}){
const sessionPath=sessionClient.sessionPath(config.googleProjectID,config.dialogFlowSessionID)
let self=module.exports
const request={
session:sessionPath,
queryInput:{
event:{
name:event,
// parameters:structjson.jsonToStructProto(parameters),
languageCode:config.dialogFlowSessionLanguageCode
}
}
}
let responses=await sessionClient.detectIntent(request)
responses=await self.handleAction(responses)
// console.log("EVENT RESPONSES",responses)
return responses
},
handleAction:function(responses){
let self=module.exports
let queryResult=responses[0].queryResult;
// console.log("QUERYRESULT",queryResult)
switch(queryResult.action){
case 'homeDeliveryYes':
// console.log("YAAAAY")
if(queryResult.allRequiredParamsPresent){
self.saveRegistration(queryResult.parameters.fields)
}
break;
}
return responses
},
saveRegistration:async function(fields){
const user=new User({
name:fields.name.stringValue,
mobile:fields.phone.stringValue,
address:fields.address.stringValue,
registerDate:Date.now()
})
try{
const useR=await user.save();
console.log("USER",useR)
}
catch(e){
console.log("ERROR",e)
}
}
}<file_sep>const {WebhookClient} = require('dialogflow-fulfillment');
const { Payload } = require("dialogflow-fulfillment");
const mongoose = require('mongoose');
const Product=mongoose.model('product');
const customPayload={
signal_info:"cards",
cards:[]
}
// Product.find({name:'<NAME>'},(err,products)=>{
// if(products!==null){
// // console.log("products search from db",product)
// products.map(product=>{
// customPayload.cards.push(product)
// })
// console.log(customPayload)
// }
// })
module.exports = app => {
app.post('/', async (req, res) => {
const agent = new WebhookClient({ request: req, response: res });
function fallback(agent) {
agent.add(`Bhai samjh nahi aya`);
agent.add(`I'm sorry, can you try again?`);
}
async function foodProducts(agent){
console.log("AGENT VALUE",agent.parameters.animal)
let products = await Product.find({for:agent.parameters.animal?agent.parameters.animal:'dog'},(err,products)=>{
if(products.length>0)
return products;
else
return ;
});
customPayload.cards=products
// console.log("CUSTOM PAYLOAD",customPayload);
if(customPayload.cards.length>0)
{
agent.add('Here are some products available')
agent.add(
new Payload(agent.UNSPECIFIED, customPayload, {rawPayload: true, sendAsMessage: true})
);
}
else{
agent.add('nothing found in the database')
}
}
let intentMap = new Map();
intentMap.set('FOOD',foodProducts)
intentMap.set('Default Fallback Intent', fallback);
agent.handleRequest(intentMap);
});
app.get('/',(req,res)=>{
// // const agent = new WebhookClient({ request: req, response: res });
// // function snoopy(agent) {
// // agent.add(`Welcome to my Snoopy fulfillment!`);
// // }
// // let intentMap = new Map();
// // intentMap.set('snoopy', snoopy);
// // agent.handleRequest(intentMap);
res.send({"test":"center"})
})
}
// const payload = {
// signal_info: "cards",
// cards: [
// {
// header: "<NAME>",
// available: "For:Dogs",
// price_range: "$20-$40",
// img: "https://www.pettz.com/media/catalog/product/cache/1/small_image/210x/9df78eab33525d08d6e5fb8d27136e95/c/0/c01304-6.jpg"
// },
// {
// header: "Dried Chicken",
// available: "For:Cats",
// price_range: "$12-24",
// img: "https://cdn.shopify.com/s/files/1/2133/9385/products/CatKibble-RC-Chicken-1080x1080_0b97bb13-4ad3-4d57-a603-d93f54d84f1c_600x.jpg?v=1571710492"
// },
// {
// header: "<NAME>",
// available: "For:Dogs",
// price_range: "$20-120",
// img: "https://www.pettz.com/media/catalog/product/cache/1/small_image/210x/9df78eab33525d08d6e5fb8d27136e95/c/0/c01361.jpg"
// },
// {
// header: "Chips",
// available: "For:Dogs",
// price_range: "$20-40",
// img: "https://www.pettz.com/media/catalog/product/cache/1/small_image/210x/9df78eab33525d08d6e5fb8d27136e95/c/1/c10090-16.jpg"
// },
// {
// header: "Pressed bones",
// available: "For:Dogs",
// price_range: "$20-30",
// img: "https://www.pettz.com/media/catalog/product/cache/1/small_image/210x/9df78eab33525d08d6e5fb8d27136e95/c/0/c00830-12.jpg"
// },
// {
// header: "Minced Tuna",
// available: "For:Cats",
// price_range: "$24.99",
// img: "https://cdn.shopify.com/s/files/1/2133/9385/products/791411_d7563599-687c-4f0b-bee2-d2c98f9e98d6_600x.jpg?v=1571710390"
// }
// ]
// };<file_sep>import React, { Component } from 'react';
import QuickReply from './QuickReply';
import '../../css/quickReplies.css'
class QuickReplies extends Component {
constructor(props){
super();
// console.log("props of quick",props)
}
_handleClick=(event, payload, text)=> {
this.props.replyClick(event, payload, text);
}
renderQuickReply(reply, i) {
return <QuickReply key={i} click={this._handleClick} reply={reply} />;
}
renderQuickReplies(quickReplies) {
if (quickReplies) {
return quickReplies.map((reply, i) => {
return this.renderQuickReply(reply, i);
}
)
} else {
return null;
}
}
render() {
return (
<div className="quick-replies-parent">
{/* <div className="quick-reply-speaks">
<a href="/" className="">{this.props.speaks}</a>
</div> */}
<div id="quick-replies" className="">
{this.renderQuickReplies(this.props.payload)}
{/* {this.props.text && <p>
{this.props.text.stringValue}
</p>
} */}
</div>
</div>
);
}
}
export default QuickReplies;
<file_sep>import React from 'react';
import '../../css/card.css'
const Card = (props) => {
console.log("ONE CARD PROPS",props)
return (
// <div style={{ height: 270, paddingRight:30, float: 'left'}}>
// <div className="card">
// <div className="card-image" style={{ width: 240}}>
// <img alt={props.payload.fields.header.stringValue} src={props.payload.fields.img.stringValue} />
// <span className="card-title">{props.payload.fields.header.stringValue}</span>
// </div>
// <div className="card-content">
// {props.payload.fields.price_range.stringValue}
// <p> <a href="/">{props.payload.fields.available.stringValue}</a></p>
// </div>
// <div className="card-action">
// <a target="_blank" rel="noopener noreferrer" href={props.payload.fields.price_range.stringValue}>GET NOW</a>
// </div>
// </div>
// </div>
<div>
<div className="card">
<div className="card-image">
<img alt={props.payload.fields.name.stringValue} src={props.payload.fields.image.stringValue} />
</div>
<div className="card-header">
<p>{props.payload.fields.name.stringValue}</p>
</div>
<div className="card-description">
<p>{props.payload.fields.for.stringValue}</p>
</div>
<div className="card-button">
<p>{props.payload.fields.price.stringValue}</p>
</div>
</div>
</div>
);
};
export default Card; <file_sep>module.exports=require('./dev')<file_sep>import React from 'react'
import '../../css/chatbot.css'
import '../../css/cards.css'
import Messages from './Message'
import QuickReplies from './QuickReplies'
import Card from './Card'
import Loading from '../../components/chatbot/Loading'
const axios = require('axios');
class Chatbot extends React.Component {
messagesEnd;
talkInput;
//constructor
constructor(props) {
//super
super(props)
//state
this.state = {
messages: [],
showBot: false,
loader: false
}
//bindings
this.showBot = this.showBot.bind(this);
this.hideBot = this.hideBot.bind(this);
this._handleInputKeyPress = this._handleInputKeyPress.bind(this)
this._handleQuickReplyPayload = this._handleQuickReplyPayload.bind(this);
}
_handleQuickReplyPayload(event, payload, text) {
event.preventDefault();
event.stopPropagation();
console.log("QUICK REPLY PAYLOAD", payload)
console.log("QUICK REPLY TEXT", text)
switch (payload) {
case 'DOIT':
this.df_event_query('DOIT');
break;
case 'homeDelivery':
this.df_event_query('homeDelivery');
break;
case 'ADDRESS':
this.df_event_query('ADDRESS');
break;
case 'FOOD':
this.df_event_query('FOOD');
break;
case 'TOYS':
this.df_event_query('TOYS');
break;
default:
this.df_text_query(text);
break;
}
}
//class functions
showBot(event) {
event.preventDefault();
event.stopPropagation();
this.setState({ showBot: true })
}
hideBot(event) {
event.preventDefault();
event.stopPropagation();
this.setState({ showBot: false })
}
async df_text_query(queryText) {
let says = {
speaks: 'user',
msg: {
text: {
text: queryText
}
}
}
this.setState({ messages: [...this.state.messages, says] });
this.setState({
loader: true
});
const res = await axios.post('http://localhost:5000/api/df_text_query', { text: queryText });
console.log("Response in df_TEXT_QUERY IN Chatbot component",res)
// console.log("PAYLOAD FOR QUICK REPLIES",res.data[0].queryResult.fulfillmentMessages[1].payload.fields.quick_replies.listValue.values[0].structValue.fields.payload.stringValue)
// console.log("PAYLOAD FOR QUICK REPLIES",res.data[0].queryResult.fulfillmentMessages)
for (let msg of res.data[0].queryResult.fulfillmentMessages) {
says = {
speaks: 'bot',
msg: msg
}
this.setState({ messages: [...this.state.messages, says],loader: false });
}
// console.log(res.data[0].queryResult.fulfillmentMessages[0].text.text)
};
//event handle function
async df_event_query(event) {
this.setState({
loader: true
});
const res = await axios.post('http://localhost:5000/api/df_event_query', { event: event })
// console.log(res)
for (let msg of res.data[0].queryResult.fulfillmentMessages) {
let says = {
speaks: 'bot',
msg: msg
}
this.setState({ messages: [...this.state.messages, says],loader: false })
}
};
renderMessages(stateMessages) {
if (stateMessages) {
return stateMessages.map((message, i) => {
return this.renderOneMessage(message, i);
})
}
}
renderCards(cards) {
return cards.map((card, i) => <Card key={i} payload={card.structValue}/>);
}
renderOneMessage(message, i) {
// console.log("IndiVIDUAL MESSAGE",message)
// console.log("individual message", message)
// console.log("MESSAGE IN RENDER MESSAGE",message)
if (message.msg && message.msg.text && message.msg.text.text) {
console.log("Normal MESSAGE TRIGGERED")
return <Messages key={i} text={message.msg.text.text} speaks={message.speaks} />
}
else if (message.msg && message.msg.payload && message.msg.payload.fields && message.msg.payload.fields.signal_info.stringValue=='store details') {
// console.log(message.msg.payload.fields)
console.log("CUSTOM MESSAGE TRIGGERED")
// info={message.msg.payload.fields.detailSchema}
// console.log(message.msg.payload.fields)
// let fieldss = message.msg.payload.fields.detailSchema.structValue.fields
let email=(message.msg.payload.fields.detailSchema.structValue.fields.request_detail_email.stringValue);
let location=(message.msg.payload.fields.detailSchema.structValue.fields.request_detail_location.stringValue);
let mob=(message.msg.payload.fields.detailSchema.structValue.fields.request_detail_mob.stringValue);
let name=(message.msg.payload.fields.detailSchema.structValue.fields.request_detail_name.stringValue);
let whatsapp=(message.msg.payload.fields.detailSchema.structValue.fields.request_detail_whatsapp.stringValue)
let text=[email,location,mob,name,whatsapp]
return <Messages key={i} type={'extra_details'} speaks={message.speaks} text={text}/>
}
else if(message.msg && message.msg.payload && message.msg.payload.fields && message.msg.payload.fields.signal_info.stringValue=='cards'){
console.log("MESSAGE",message)
// let width=message.msg.payload.fields.cards.listValue.length*150
// let style={
// width:
// }
// console.log("CARDS TRIGGERED")
// return <div key={i}>
// <div className="card-panel grey lighten-5 z-depth-1">
// <div style={{overflow: 'hidden'}}>
// <div className="col s2">
// <a href="/" className="btn-floating btn-large waves-effect waves-light red">{message.speaks}</a>
// </div>
// <div style={{ overflow: 'auto', overflowY: 'scroll'}}>
// <div style={{ height: 300, width:message.msg.payload.fields.cards.listValue.values.length * 270}}>
// {this.renderCards(message.msg.payload.fields.cards.listValue.values)}
// </div>
// </div>
// </div>
// </div>
// </div>
return <div key={i}>
<div className="card-panel">
<div style={{ display: 'inline-block', width: '14%' }} >
<img src={require('../../images/chatbot.png')} alt="botHead" style={{ width: 32, height: 37 }} className="circle responsive-img"></img>
</div>
<div className="cards-section">
<div className="cards-inner-section" style={{width:message.msg.payload.fields.cards.listValue.values.length*120}}>
{this.renderCards(message.msg.payload.fields.cards.listValue.values)}
</div>
</div>
</div>
</div>
}
else if (message.msg.payload.fields.quick_replies) {
return <QuickReplies
text={message.msg.payload.fields.text ? message.msg.payload.fields.text : null}
key={i}
replyClick={this._handleQuickReplyPayload}
speaks={message.speaks}
payload={message.msg.payload.fields.quick_replies.listValue.values}
/>
}
else{
console.log("nothing triggered")
}
}
_handleInputKeyPress(e) {
if (e.key === 'Enter') {
this.df_text_query(e.target.value)
e.target.value = ''
}
}
componentDidMount() {
this.df_event_query('Welcome')
}
componentDidUpdate() {
this.messagesEnd.scrollIntoView({ behavior: 'smooth' });
if (this.talkInput) {
this.talkInput.focus()
}
}
render() {
let showBot = (
<div className="chat-bot-icon-parent">
<div onClick={this.showBot}>
<img alt="chat-bot-icon" src={require("../../images/companyicon.png")} className="chatboticon" />
</div>
<div ref={el => {
this.messagesEnd = el;
}} style={{ float: "left", clear: "both" }}
/>
</div>
);
if (this.state.showBot === true) {
showBot = (
<div className="main-container">
<div className="chat-window">
<div className="header">
{/* <img src={require('../../images/companylogo.png')} alt="companylogo" className="chat-bot-header-logo" /> */}
<p className="chat-bot-header-text">Pet Bot</p>
</div>
<div className="message-body" id="message-body">
{this.renderMessages(this.state.messages)}
{this.state.loader === true ? (
<div
style={{ overflow: "hidden", width: "100%", marginTop: 10 }}
>
<div style={{ display: "inline-block", width: "14%" }}>
<img
src={require('../../images/chatbot.png')}
alt="botHead"
style={{ width: 32, height: 37 }}
className="circle responsive-img"
></img>
</div>
<div
className="left-message-parent "
style={{ display: "inline-block", width: "70%" }}
>
<div
className="left-message card-panel z-depth-1"
style={{
backgroundColor: "#eeeef1",
padding: 5,
borderRadius: 25,
width: "fit-content",
clear: "both",
position: "relative",
borderColor: "#ddd",
borderWidth: 2,
borderStyle: "solid",
minWidth: 49
}}
>
<div
className="row valign-wrapper"
style={{ marginBottom: 0 }}
>
<div className="col s10" style={{ paddingRight: 15 }}>
<div
className="black-text"
style={{ padding: 5, fontSize: 14 }}
>
<Loading />
</div>
</div>
</div>
</div>
</div>
</div>
) : null}
<div ref={el => {
this.messagesEnd = el;
}} style={{ float: "left", clear: "both",marginTop:10}}
/>
</div>
<div className="end-div">
<div className="brand-reference-div">
<div className="end-footer">
<div>
<a target="_blank" rel="noopener noreferrer" style={{ color: "#ffffff", textDecoration: 'none', paddingTop: '6px',fontSize:'12px', paddingBottom: '6px' }} href="https://cogniaim.com/">
Powered by CogniAIm
</a>
</div>
</div>
</div>
<div className="chatInput">
<input id="message-input" placeholder="Send a Message" type="text" onKeyPress={this._handleInputKeyPress} autoComplete="off" />
</div>
</div>
</div>
<div className="chat-bot-icon-parent">
<div onClick={this.hideBot}>
<img alt="chat-bot-icon" src={require("../../images/companyicon.png")} className="chatboticon" />
</div>
</div>
</div>
)
return showBot
}
return showBot
}
}
export default Chatbot<file_sep>const mongoose=require('mongoose');
const {Schema}=mongoose;
const userSchema=new Schema({
name:String,
mobile:String,
address:String,
registerDate:Date
})
mongoose.model('user',userSchema)
|
ee629df7fece9c8f9db5ff6f8f1f1da1eeb76466
|
[
"JavaScript"
] | 9 |
JavaScript
|
atomgupta/petStoreBot
|
5e9116c48a8aadbeaf0fef97e93e9ac4d0fa198c
|
04b353732f4b410e5811a8756a6e13c4d308c9d7
|
refs/heads/master
|
<file_sep>const mongoose = require('mongoose');
let rawCovidData = require('./covidFeaturesYelp.js');
let rawRestaurantData = require('./restaurantDataYelp.js');
// console.log('restaurant data: ', rawRestaurantData.restaurantData.length)
// console.log('covid data: ', rawCovidData.covidData.length)
mongoose.connect('mongodb://localhost/covidData', { useUnifiedTopology: true, useNewUrlParser: true, useCreateIndex: true });
// restaurant data: {"business_id":"f9NumwFMBDn751xgFiRbNA","name":"The Range At Lake Norman","address":"10913 Bailey Rd","city":"Cornelius","state":"NC","postal_code":"28031","latitude":35.4627242,"longitude":-80.8526119,"stars":3.5,"review_count":36,"is_open":1,"attributes":{"BusinessAcceptsCreditCards":"True","BikeParking":"True","GoodForKids":"False","BusinessParking":"{'garage': False, 'street': False, 'validated': False, 'lot': True, 'valet': False}","ByAppointmentOnly":"False","RestaurantsPriceRange2":"3"},"categories":"Active Life, Gun\/Rifle Ranges, Guns & Ammo, Shopping","hours":{"Monday":"10:0-18:0","Tuesday":"11:0-20:0","Wednesday":"10:0-18:0","Thursday":"11:0-20:0","Friday":"11:0-20:0","Saturday":"11:0-20:0","Sunday":"13:0-18:0"}},
// covid data: {"business_id":"9kXRUIkwdDtnAPO6tVo51g","highlights":"FALSE","delivery or takeout":"FALSE","Grubhub enabled":"FALSE","Call To Action enabled":"FALSE","Request a Quote Enabled":"FALSE","Covid Banner":"FALSE","Temporary Closed Until":"FALSE","Virtual Services Offered":"FALSE"},
const db = mongoose.connection;
db.on('error', console.error.bind(console, 'connection error:'));
db.once('open', function() {
// we're connected!
const covidDataSchema = new mongoose.Schema({
business_id: String,
highlights: String,
delivery_or_takeout: String,
Grubhub_enabled: String,
Call_To_Action_enabled: String,
Request_a_Quote_Enabled: String,
Covid_Banner: String,
Temporary_Closed_Until: String,
Virtual_Services_Offered: String
});
const CovidData = mongoose.model('CovidData', covidDataSchema);
for (let i = 0; i < rawCovidData.covidData.length; i++) {
// console.log(rawCovidData.covidData[i]['business_id'])
let restaurantCovidData = new CovidData({
business_id: rawCovidData.covidData[i]['business_id'],
highlights: rawCovidData.covidData[i]["highlights"],
delivery_or_takeout: rawCovidData.covidData[i]["delivery or takeout"],
Grubhub_enabled: rawCovidData.covidData[i]["Grubhub enabled"],
Call_To_Action_enabled: rawCovidData.covidData[i]["Call To Action enabled"],
Request_a_Quote_Enabled: rawCovidData.covidData[i]["Request a Quote Enabled"],
Covid_Banner: rawCovidData.covidData[i]["Covid Banner"],
Temporary_Closed_Until: rawCovidData.covidData[i]["Temporary Closed Until"],
Virtual_Services_Offered: rawCovidData.covidData[i]["Virtual Services Offered"]
});
restaurantCovidData.save(function (err, restaurantCovidData) {
if (err) {
return console.error(err);
} else {
console.log(i)
}
});
}
});
<file_sep>let test = 'test';
let anotherTest = 'teststses'<file_sep>const mongoose = require('mongoose');
let rawRestaurantData = require('./restaurantDataYelp.js');
console.log('restaurant data: ', rawRestaurantData.restaurantData.length)
mongoose.connect('mongodb://localhost/restaurantData', { useUnifiedTopology: true, useNewUrlParser: true, useCreateIndex: true });
// restaurant data: {"business_id":"f9NumwFMBDn751xgFiRbNA","name":"The Range At Lake Norman","address":"10913 Bailey Rd","city":"Cornelius","state":"NC","postal_code":"28031","latitude":35.4627242,"longitude":-80.8526119,"stars":3.5,"review_count":36,"is_open":1,"attributes":{"BusinessAcceptsCreditCards":"True","BikeParking":"True","GoodForKids":"False","BusinessParking":"{'garage': False, 'street': False, 'validated': False, 'lot': True, 'valet': False}","ByAppointmentOnly":"False","RestaurantsPriceRange2":"3"},"categories":"Active Life, Gun\/Rifle Ranges, Guns & Ammo, Shopping","hours":{"Monday":"10:0-18:0","Tuesday":"11:0-20:0","Wednesday":"10:0-18:0","Thursday":"11:0-20:0","Friday":"11:0-20:0","Saturday":"11:0-20:0","Sunday":"13:0-18:0"}},
const db = mongoose.connection;
db.on('error', console.error.bind(console, 'connection error:'));
db.once('open', function() {
// we're connected!
const restaurantDataSchema = new mongoose.Schema({
"business_id":"f9NumwFMBDn751xgFiRbNA",
"name":"The Range At Lake Norman",
"address":"10913 Bailey Rd",
"city":"Cornelius",
"state":"NC",
"postal_code":"28031",
"latitude":35.4627242,
"longitude":-80.8526119,
"stars":3.5,
"review_count":36,
"is_open":1,
"attributes":{
"BusinessAcceptsCreditCards":"True",
"BikeParking":"True",
"GoodForKids":"False",
"BusinessParking":"{'garage': False, 'street': False, 'validated': False, 'lot': True, 'valet': False}",
"ByAppointmentOnly":"False",
"RestaurantsPriceRange2":"3"
},
"categories":"Active Life, Gun\/Rifle Ranges, Guns & Ammo, Shopping",
"hours":{
"Monday":"10:0-18:0",
"Tuesday":"11:0-20:0",
"Wednesday":"10:0-18:0",
"Thursday":"11:0-20:0",
"Friday":"11:0-20:0",
"Saturday":"11:0-20:0",
"Sunday":"13:0-18:0"
}
});
const RestaurantData = mongoose.model('RestaurantData', restaurantDataSchema);
// for (let i = 0; i < rawCovidData.covidData.length; i++) {
// // console.log(rawCovidData.covidData[i]['business_id'])
// let restaurantCovidData = new CovidData({
// business_id: rawCovidData.covidData[i]['business_id'],
// highlights: rawCovidData.covidData[i]["highlights"],
// delivery_or_takeout: rawCovidData.covidData[i]["delivery or takeout"],
// Grubhub_enabled: rawCovidData.covidData[i]["Grubhub enabled"],
// Call_To_Action_enabled: rawCovidData.covidData[i]["Call To Action enabled"],
// Request_a_Quote_Enabled: rawCovidData.covidData[i]["Request a Quote Enabled"],
// Covid_Banner: rawCovidData.covidData[i]["Covid Banner"],
// Temporary_Closed_Until: rawCovidData.covidData[i]["Temporary Closed Until"],
// Virtual_Services_Offered: rawCovidData.covidData[i]["Virtual Services Offered"]
// });
// restaurantCovidData.save(function (err, restaurantCovidData) {
// if (err) {
// return console.error(err);
// } else {
// console.log(i)
// }
// });
// }
});
<file_sep># covidSafeMap
Welcome to the map of restaurants that are taking safety precautions for the Covid-19 pandemic.
making sure this works
<file_sep>var testData = require('./covidFeaturesYelp.js')
console.log(testData.covidData.length)
|
01c5e255a64c38fc464e460a3fc0a29f0dbba86a
|
[
"JavaScript",
"Markdown"
] | 5 |
JavaScript
|
phamner/covidSafeMap
|
3772e8ab275cea98a16fdec4040758778ad45dc8
|
853cffeff7c3389609e4abd87bda46c6d39165bd
|
refs/heads/master
|
<repo_name>gaborcsardi/datastore<file_sep>/R/datastore-package.r
#' A key-value data store with pluggable backends
#'
#' This package implements a simple key-value data store with pluggable
#' backends. The goal is to allow collecting all associated data for a
#' project at one place, and have access to it in a uniform way.
#'
#' Data stores can be shared across projects and users and well,
#' and are (of course) kept between R sessions.
#'
#' The package comes with a \sQuote{file} backend, a data store that
#' simply stores each value in a file, named according to its key. Other
#' backends will be added as separate R packages.
#'
#' @name Datastore package
#' @docType package
#' @importFrom magrittr %>%
NULL
<file_sep>/man/Datastore-package.Rd
% Generated by roxygen2 (4.0.1): do not edit by hand
\docType{package}
\name{Datastore package}
\alias{Datastore package}
\alias{Datastore package-package}
\title{A key-value data store with pluggable backends}
\description{
This package implements a simple key-value data store with pluggable
backends. The goal is to allow collecting all associated data for a
project at one place, and have access to it in a uniform way.
}
\details{
Data stores can be shared across projects and users and well,
and are (of course) kept between R sessions.
The package comes with a \sQuote{file} backend, a data store that
simply stores each value in a file, named according to its key. Other
backends will be added as separate R packages.
}
<file_sep>/R/store-file.R
## ---------------------------------------------------------------------
#' File data store
#'
#' It stores each piece of data in a separate file, and the file name is
#' the key.
#'
#' @include store.R
#' @importFrom R6 R6Class
#' @importFrom digest digest
#' @importFrom rappdirs user_data_dir
#' @export
file_data_store = R6Class("file_data_store",
inherit = abstract_data_store,
public = list(
portable = TRUE,
initialize = function(name, create = FALSE, overwrite = FALSE,
data_dir = file.path(user_data_dir(appname()), name)) {
fds_create(self, private, name, create, overwrite, data_dir)
},
store = function(..., key = digest(list(...)), overwrite = FALSE,
envir = parent.frame()) {
fds_store(.self = self, .private = private, .key = key,
.overwrite = overwrite, .envir = envir, ...)
},
retrieve = function(key) { fds_retrieve(self, key) },
exists = function(key) { fds_exists(self, private, key) },
load = function(key, envir = parent.frame()) {
fds_load(self, private, key, envir = envir)
},
keys = function() { fds_keys(self, private) },
delete = function(key) { fds_delete(self, private, key) },
destroy = function() { fds_destroy(self, private) }
## try_load and clean are inherited
),
private = list(
data_dir = NA,
get_filename = function(key, extension = ".rda") {
file.path(private$data_dir, self$name, key) %>%
paste0(extension)
},
store_exists = function() {
assert_that(is.dir(private$data_dir))
},
store_writeable = function() {
assert_that(is.dir(private$data_dir))
assert_that(is.writeable(private$data_dir))
}
)
)
## ---------------------------------------------------------------------
#' @importFrom falsy %||%
#' @importFrom assertthat see_if is.dir
#' @keywords internal
fds_create <- function(self, private, name, create, overwrite, data_dir) {
assert_that(is.string(name))
assert_that(is.writeable.dir(data_dir))
self$name <- name
private$data_dir <- absolute_path(data_dir)
if (create) {
if (file.exists(data_dir) && ! overwrite) {
see_if(is.dir(data_dir)) %||%
stop("data_dir exists and it is not a directory")
is_empty_dir(data_dir) %||%
stop("data_dir exists and is not empty")
}
dir_create_if_missing(data_dir)
clean_dir(data_dir)
}
self
}
## ---------------------------------------------------------------------
#' @importFrom assertthat assert_that is.string is.flag
#' @importFrom falsy %||% nay
#' @keywords internal
fds_store <- function(.self, .private, .key, .overwrite, .envir, ...) {
assert_that(is.string(.key))
assert_that(is.flag(.overwrite))
assert_that(.private$store_writeable())
file_name <- .private$get_filename(.key)
.overwrite %||% nay(file.exists(file_name)) %||% stop("key already exists")
file_name %>%
dirname() %>%
dir_create_if_missing()
safe_save(..., file_name = file_name, envir = .envir)
.self
}
## ---------------------------------------------------------------------
#' @importFrom assertthat assert_that is.string
#' @importFrom falsy %||%
#' @keywords internal
fds_retrieve <- function(self, key) {
## Load checks that the DS exists
my_env <- new.env()
self$load(key, envir = my_env)
as.list(my_env)
}
## ---------------------------------------------------------------------
#' @importFrom assertthat assert_that is.string
#' @keywords internal
fds_exists <- function(self, private, key) {
assert_that(is.string(key))
assert_that(private$store_exists())
private$get_filename(key) %>%
file.exists()
}
#' @keywords internal
fds_load <- function(self, private, key, envir = envir) {
## This also checks that the DS exists
self$exists(key) %||% stop("key does not exist")
private$get_filename(key) %>%
load(envir = envir)
self
}
#' @keywords internal
fds_keys <- function(self, private) {
assert_that(private$store_exists())
file.path(private$data_dir, self$name) %>%
dir(pattern = "\\.rda", all.files = TRUE, no.. = TRUE) %>%
sub(pattern = "\\.rda", replacement = "")
}
#' @keywords internal
fds_delete <- function(self, private, key) {
## Also checks that the DS exists
self$exists(key) %||% stop("key does not exist")
private$get_filename(key) %>%
unlink(recursive = TRUE)
self
}
#' @keywords internal
fds_destroy <- function(self, private) {
assert_that(private$store_exists())
private$data_dir %>%
unlink(recursive = TRUE)
}
<file_sep>/man/file_data_store.Rd
% Generated by roxygen2 (4.0.1): do not edit by hand
\docType{data}
\name{file_data_store}
\alias{file_data_store}
\title{File data store}
\format{\preformatted{Class 'R6ClassGenerator' <environment: 0x105efb628>
- attr(*, "name")= chr "file_data_store_generator"
}}
\usage{
file_data_store
}
\description{
It stores each piece of data in a separate file, and the file name is
the key.
}
\keyword{datasets}
<file_sep>/man/appname.Rd
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{appname}
\alias{appname}
\title{The name to use to create the configuration directories}
\usage{
appname()
}
\arguments{
\item{appname}{If missing, the current application name
is returned. Otherwise it must be a string and it sets the
application name.}
}
\description{
The name to use to create the configuration directories
}
<file_sep>/R/store.R
## ---------------------------------------------------------------------
#' Abstract data store
#'
#' @export
abstract_data_store <- R6Class("abstract_data_store",
public = list(
portable = TRUE,
name = NA,
## Must be implemented
initialize = function(name, create = FALSE, overwrite = FALSE, ...) {
stop("cannot create abstract data store directly")
},
store = function(..., key = digest(list(...)), overwrite = FALSE,
envir = parent.frame()) {
stop("store method not implemented")
},
retrieve = function(key) { stop("retrieve method not implemented") },
try_load = function(key, envir = parent.frame()) {
ads_try_load(self, key, envir)
},
clean = function() { ads_clean(self) },
destroy = function() { ads_destroy(self) },
exists = function(key) { ads_exists(self, key) },
## Optional
load = function(key, envir = parent.frame()) {
stop("not implemented")
},
keys = function() { stop("keys method not implemented") },
delete = function(key) { stop("delete method not implemented") }
)
)
## ---------------------------------------------------------------------
#' @keywords internal
ads_try_load <- function(self, key, envir) {
try(self$load(key, envir = envir), silent = TRUE)
}
## ---------------------------------------------------------------------
#' @keywords internal
ads_clean <- function(self) {
self$keys() %>%
sapply(self$delete)
self
}
## ---------------------------------------------------------------------
#' @keywords internal
ads_destroy <- function(self) {
self$clean()
}
## ---------------------------------------------------------------------
#' @keywords internal
ads_exists <- function(self, key) {
try(self$retrieve(key), silent = TRUE) %>%
inherits("try-error")
}
<file_sep>/man/data_store_builder.Rd
% Generated by roxygen2 (4.0.1): do not edit by hand
\docType{data}
\name{data_store_builder}
\alias{data_store_builder}
\title{Data store builder}
\format{\preformatted{Class 'R6ClassGenerator' <environment: 0x1060125f0>
- attr(*, "name")= chr "data_store_builder_generator"
}}
\usage{
data_store_builder
}
\description{
Data store builder
}
\keyword{datasets}
<file_sep>/tests/testthat/test_file.R
context("File data store")
## The tests only write to a temporary directory, so we create a
## new builder for that. This is also called `data_store` for
## simplicity, and it shadows the one in the package.
library(datastore)
dir.create(config_dir <- tempfile())
data_store <- data_store_builder$new(dir = config_dir)
make_id <- function(length = 6) {
id <- sample(c(letters, LETTERS, 0:9), length, replace=TRUE)
paste(id, collapse="")
}
test_that("Creating a file data store", {
## Create data store
name <- make_id()
db <- data_store$create(type = "file", name = name,
data_dir = tempfile())
expect_that(data_store$ls(), equals(name))
expect_that(db$keys(), equals(character()))
## Now destroy it
data_store$destroy(name = name)
expect_that(data_store$ls(), equals(character()))
})
test_that("Storing data in a file data store", {
## Create a data store
name <- make_id()
db <- data_store$create(type = "file", name = name,
data_dir = tempfile())
## Store some data in it
mydata <- rnorm(10)
db$store(key = "norm10", mydata)
expect_that(db$keys(), equals("norm10"))
## And retrieve it
mydata2 <- db$retrieve("norm10")$mydata
expect_that(mydata2, is_identical_to(mydata))
## Try overwriting it
mydata <- rnorm(10, mean=10)
expect_that(db$store(key = "norm10", mydata),
throws_error("key already exists"))
## But we can insist
db$store(key = "norm10", mydata, overwrite = TRUE)
expect_that(db$keys(), equals("norm10"))
mydata2 <- db$retrieve("norm10")$mydata
expect_that(mydata2, is_identical_to(mydata2))
## Chaining
mydata2 <- rnorm(10)
db$clean()
db$store(key = "norm1", mydata)$
store(key = "norm2", mydata2)
expect_that(db$retrieve("norm1")$mydata,
is_identical_to(mydata))
expect_that(db$retrieve("norm2")$mydata2,
is_identical_to(mydata2))
## Destroy data store
data_store$destroy(name)
})
test_that("Retrieving data from a file data store", {
## Create a data store
name <- make_id()
db <- data_store$create(type = "file", name = name,
data_dir = tempfile())
## Store some data in it
mydata <- rnorm(10)
db$store(key = "norm10", mydata)
## And retrieve it
mydata2 <- db$retrieve("norm10")$mydata
expect_that(mydata2, is_identical_to(mydata))
## Retrieve something that is not there
expect_that(db$retrieve("norm1"),
throws_error("key does not exist"))
## Destroy data store
data_store$destroy(name)
})
test_that("Checking if a key exists in a file data store", {
## Create a data store
name <- make_id()
db <- data_store$create(type = "file", name = name,
data_dir = tempfile())
## Store some data in it
mydata <- rnorm(10)
db$store(key = "norm10", mydata)
## Check if it is there
expect_that(db$exists("norm10"), is_true())
## This is not there
expect_that(db$exists("norm1"), is_false())
## Destroy data store
data_store$destroy(name)
})
test_that("Loading data from a file data store", {
## Create a data store
name <- make_id()
db <- data_store$create(type = "file", name = name,
data_dir = tempfile())
## Store some data in it
mydata <- rnorm(10)
db$store(key = "norm10", mydata)
## Load it into the specified environment
my_env <- new.env()
db$load("norm10", envir = my_env)
expect_that(ls(my_env), equals("mydata"))
expect_that(my_env$mydata, is_identical_to(mydata))
## Load into the current environment
mydata2 <- mydata
rm(mydata)
db$load("norm10")
expect_that(mydata, is_identical_to(mydata2))
## Load something that is not there
expect_that(db$load("norm1"),
throws_error("key does not exist"))
## Destroy data store
data_store$destroy(name)
})
test_that("Trying to load data from a file data store", {
## Create a data store
name <- make_id()
db <- data_store$create(type = "file", name = name,
data_dir = tempfile())
## Store some data in it
mydata <- rnorm(10)
db$store(key = "norm10", mydata)
## Try loading it into the specified environment
my_env <- new.env()
res <- db$try_load("norm10", envir = my_env)
expect_that(falsy::is_truthy(res), is_true())
expect_that(ls(my_env), equals("mydata"))
expect_that(my_env$mydata, is_identical_to(mydata))
## Try loading into the current environment
mydata2 <- mydata
rm(mydata)
expect_that(falsy::is_truthy(db$try_load("norm10")), is_true())
db$try_load("norm10")
expect_that(mydata, is_identical_to(mydata2))
## Try load something that is not there
expect_that(falsy::is_falsy(db$try_load("norm1")), is_true())
## Destroy data store
data_store$destroy(name)
})
test_that("Listing keys in a file data store", {
## Create a data store
name <- make_id()
db <- data_store$create(type = "file", name = name,
data_dir = tempfile())
## Should be empty
expect_that(db$keys(), equals(character()))
## Store some data in it
mydata <- rnorm(10)
db$store(key = "norm10", mydata)
## Now it is not empty
expect_that(db$keys(), equals("norm10"))
## Delete the data
db$delete("norm10")
## Empty again
expect_that(db$keys(), equals(character()))
## Destroy data store
data_store$destroy(name)
})
test_that("Deleting a key from a file data store", {
## Create a data store
name <- make_id()
db <- data_store$create(type = "file", name = name,
data_dir = tempfile())
## Store some data in it
mydata <- rnorm(10)
db$store(key = "norm10", mydata)
## Delete the data
db$delete("norm10")
## Chaining
mydata2 <- rnorm(10)
db$store(key = "norm1", mydata)$
store(key = "norm2", mydata2)
db$delete("norm1")$
delete("norm2")
## Empty now
expect_that(db$keys(), equals(character()))
## Destroy data store
data_store$destroy(name)
})
test_that("Cleaning a file datastore", {
## Create a data store
name <- make_id()
db <- data_store$create(type = "file", name = name,
data_dir = tempfile())
## Clean an empty data store
db$clean()
expect_that(db$keys(), equals(character()))
## Store some data in it
mydata <- rnorm(10)
mydata2 <- rnorm(10)
db$store(key = "norm1", mydata)$
store(key = "norm2", mydata2)
## Clean it
db$clean()
## Empty now
expect_that(db$keys(), equals(character()))
## Destroy data store
data_store$destroy(name)
})
test_that("Destroying a file data store", {
## Create a data store
name <- make_id()
db <- data_store$create(type = "file", name = name,
data_dir = tempfile())
## Destroy data store
data_store$destroy(name)
## Does not exist any more
expect_that(data_store$ls(), equals(character()))
## Operations on db give error message
expect_that(db$store("foobar"), throws_error("does not exist"))
expect_that(db$retrieve("foobar"), throws_error("does not exist"))
expect_that(db$exists("foobar"), throws_error("does not exist"))
expect_that(db$load("foobar"), throws_error("does not exist"))
expect_that(db$keys(), throws_error("does not exist"))
expect_that(db$delete("foobar"), throws_error("does not exist"))
expect_that(db$destroy(), throws_error("does not exist"))
expect_that(db$clean(), throws_error("does not exist"))
## How about try_load? This should fail, too, but we cannot
## test it currently, because nested try's are awkward.
expect_that(db$try_load("foobar"), throws_error("does not exist"))
})
<file_sep>/R/assert.R
#' Check for writeable directories
#'
#' If the directory exists, then is checks whether it is
#' writable by the user. If it does not exist, it checks
#' whether the user can create it, by checking that its parent
#' directory exists and is writable by the user.
#'
#' @param path The path to check.
#'
#' @importFrom assertthat is.dir is.writeable on_failure<-
#' @keywords internal
#' @rdname datastore_utils
is.writeable.dir <- function(path) {
((file.exists(path) && is.dir(path) && is.writeable(path)) ||
(!file.exists(path) && is.dir(dirname(path)) &&
is.writeable(dirname(path))))
}
on_failure(is.writeable.dir) <- function(call, env) {
paste0(deparse(call$x), " is not a writeable directory")
}
<file_sep>/man/data_store.Rd
% Generated by roxygen2 (4.0.1): do not edit by hand
\docType{data}
\name{data_store}
\alias{data_store}
\title{Default data store builder}
\format{\preformatted{Classes 'data_store_builder', 'R6' <environment: 0x105ff4508>
}}
\usage{
data_store
}
\description{
Default data store builder
}
\keyword{datasets}
<file_sep>/tests/testthat.R
library(testthat)
test_check("datastore")
<file_sep>/tests/testthat/test_builder.R
context("Builder")
## The tests only write to a temporary directory, so we create a
## new builder for that. This is also called `data_store` for
## simplicity, and it shadows the one in the package.
library(datastore)
dir.create(config_dir <- tempfile())
data_store <- data_store_builder$new(dir = config_dir)
test_that("Listing data stores", {
## Initially empty
expect_that(data_store$ls(), equals(character()))
## Create data stores, so that we have them in the list
db <- data_store$create(name = "foobar",
data_dir = file.path(config_dir, "foobar"))
expect_that(data_store$ls(), equals("foobar"))
db <- data_store$create(name = "foo",
data_dir = file.path(config_dir, "foo"))
expect_that(data_store$ls(), equals(c("foo", "foobar")))
})
test_that("Creating data stores", {
## We already have two, try overwriting
expect_that(data_store$create(name = "foobar"),
throws_error("already exists"))
## But we can insist
data_store$create(name = "foobar", overwrite_config = TRUE,
data_dir = file.path(config_dir, "foobar2"))
## Try overwriting the store as well, data dir empty, so all is well
data_store$create(name = "foobar", overwrite_config = TRUE,
data_dir = file.path(config_dir, "foobar"))
## Create a file in data_dir, so it will fail
cat("hello", file = file.path(config_dir, "foobar", "mydata"))
expect_that(data_store$create(name = "foobar", overwrite_config = TRUE,
data_dir = file.path(config_dir, "foobar")),
throws_error("data_dir exists and is not empty"))
## But we can overwrite and empty it is requested
data_store$create(name = "foobar", overwrite_config = TRUE,
overwrite_data = TRUE,
data_dir = file.path(config_dir, "foobar"))
expect_that(is_empty_dir(file.path(config_dir, "foobar")), is_true())
})
test_that("Setup existing data store", {
## This was created above, connect to it
db <- data_store$setup(type = "file", name = "bark",
data_dir = file.path(config_dir, "foobar2"))
expect_that("bark" %in% data_store$ls(), is_true())
## If already set up, then error
expect_that(data_store$setup(type = "file", name = "bark",
data_dir = file.path(config_dir, "foobar2")),
throws_error("already exists"))
## But we can overwrite if requested
db2 <- data_store$setup(type = "file", name = "bark", overwrite = TRUE,
data_dir = file.path(config_dir, "foobar"))
data_dir <- environment(db2$store)$private$data_dir
expect_that(data_dir, equals(file.path(config_dir, "foobar")))
})
test_that("Connect to data store", {
## Connect to a previous data store
db <- data_store$connect("foobar")
expect_that(inherits(db, "file_data_store"), is_true())
## If it does not exist, need to improve error message
expect_that(data_store$connect("bar"), throws_error())
})
test_that("Destroying a data store", {
})
test_that("Forgetting a data store", {
})
<file_sep>/man/abstract_data_store.Rd
% Generated by roxygen2 (4.0.1): do not edit by hand
\docType{data}
\name{abstract_data_store}
\alias{abstract_data_store}
\title{Abstract data store}
\format{\preformatted{Class 'R6ClassGenerator' <environment: 0x105f901c0>
- attr(*, "name")= chr "abstract_data_store_generator"
}}
\usage{
abstract_data_store
}
\description{
Abstract data store
}
\keyword{datasets}
<file_sep>/Makefile
all: README.md
README.md: vignettes/README.Rmd
cd vignettes && \
Rscript -e "library(knitr); knit('README.Rmd', quiet = TRUE)"
cat vignettes/README.md | \
awk ' BEGIN { n=0; } \
/^-->$$/ { n++; } \
{ if (n >= 1) print; }' | tail +2 > README.md
rm vignettes/README.md
<file_sep>/R/utils.R
#' The name to use to create the configuration directories
#'
#' @param appname If missing, the current application name
#' is returned. Otherwise it must be a string and it sets the
#' application name.
#'
#' @importFrom assertthat assert_that is.string
#'
#' @export
appname <- function() {
name <- "R-datastore"
function(appname) {
if (missing(appname)) {
name
} else {
assert_that(is.string(appname))
name <<- appname
}
}
}()
dir_create_if_missing <- function(path, recursive = FALSE) {
if (! file.exists(path)) {
dir.create(path, recursive = recursive)
}
}
clean_dir <- function(path) {
path %>%
list.files(all.files = TRUE, no.. = TRUE, full.names = TRUE) %>%
unlink(recursive = TRUE)
}
is_empty_dir <- function(path) {
path %>%
dir(all.files = TRUE, no.. = TRUE) %>%
length() %>%
magrittr::equals(0)
}
#' @importFrom assertthat assert_that is.string
absolute_path <- function(path) {
assert_that(is.string(path))
path <- path.expand(path)
if (substr(path, 1, 1) == "/") {
path
} else {
file.path(getwd(), path)
}
}
safe_save <- function(..., file_name, envir) {
dir <- dirname(file_name)
base <- basename(file_name)
tmp_file <- tempfile(paste0(base, "-", Sys.getpid(), "-"),
tmpdir = dir)
on.exit(suppressWarnings(file.remove(tmp_file)), add = TRUE)
save(..., file = tmp_file, envir = envir)
file.rename(tmp_file, file_name)
}
<file_sep>/man/datastore_utils.Rd
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{is.writeable.dir}
\alias{is.writeable.dir}
\title{Check for writeable directories}
\usage{
is.writeable.dir(path)
}
\arguments{
\item{path}{The path to check.}
}
\description{
If the directory exists, then is checks whether it is
writable by the user. If it does not exist, it checks
whether the user can create it, by checking that its parent
directory exists and is writable by the user.
}
\keyword{internal}
<file_sep>/README.md
# Datastore - introduction
[](https://travis-ci.org/gaborcsardi/datastore)
[](https://ci.appveyor.com/project/gaborcsardi/datastore)
[](http://cran.rstudio.com/web/packages/datastore/index.html)
datastore is an R package that creates project-specific key-value
data stores to hold arbitrary R objects. Data stores are persistent
across R sessions and can be shared among users.
## Installation
```r
library(devtools)
install_github("hadley/rappdirs")
install_github("gabocsardi/falsy")
install_github("gaborcsardi/datastore")
```
```r
library(datastore)
library(falsy)
```
## Usage
### Creating data stores
Data store configurations are managed by _data store builders_.
The default builder in the package (called `data_store`)
keeps config data in the standard
[application directories](https://github.com/hadley/rappdirs).
In the examples below, we create another builder that uses the R sesssions
temporary directory for all configuration files. For real data stores you
will want to skip this step, and just use the default builder.
```r
dir.create(config_dir <- tempfile())
data_store <- data_store_builder$new(dir = config_dir)
```
The `data_store` builder can create, list and destroy
data stores. Right now the list of data stores is empty:
```r
data_store$ls()
## character(0)
```
Now we will create a data store named `acme`. It will use the `file`
backend. It will use `data_dir` for storing the actual data.
For real projects a good choice for the data location is the
[_user data directory_](https://github.com/hadley/rappdirs) of the
project. For data stores shared among users, another
location can be speficied.
```r
dir.create(data_dir <- tempfile())
db <- data_store$create(type = "file", name = "acme", data_dir = data_dir)
```
If everything went fine and the data directory was created,
then we have now an empty data store. `data_store$create` also returns a
handle to manipulate the newly created data store. E.g. `db$keys()` lists
all keys:
```r
data_store$ls()
## [1] "acme"
db$keys()
## character(0)
```
### Storing and retrieving data
A `file` data store can hold arbitrary R objects, and multiple objects can
be stored together, under the same key. This is analogous to using `save()`
to store R objects in a file, with the ease of not having to remember
where the data is on the disk, and a unified interface to it.
```r
my_data_1 <- rnorm(100)
my_data_2 <- runif(100)
db$store(key = "random", my_data_1, my_data_2)
```
Note that the `key` argument of `db$store` must be explicitly named,
as all unnamed arguments will be treated as data.
`db$retrieve` queries the value of a key, and returns it in a list.
Alternatively the value(s) can be also loaded into an environment
with `db$load`.
```r
db$keys()
## [1] "random"
from_db <- db$retrieve("random")
names(from_db)
## [1] "my_data_1" "my_data_2"
identical(from_db$my_data_1, my_data_1)
## [1] TRUE
```
`db$exists` tests whether a key exists, and `db$delete` deletes a key from
the data store.
```r
db$exists("random")
## [1] TRUE
```
```r
db$delete("random")
```
### Chaining operations
`db$store`, `db$load` and `db$delete` operations can be chained to store,
load or delete multiple values at once with a denser syntax:
```r
my_data_3 <- "Hello world!"
this_too <- "Hello again!"
db$store(key = "random_1", my_data_1)$
store(key = "random_2", my_data_2)$
store(key = "hello", my_data_3)$
store(key = "hello_again", this_too)
rm(my_data_1, my_data_2)
db$load("random_1")$
load("random_2")
```
```r
ls()
## [1] "config_dir" "data_dir" "data_store" "db" "from_db"
## [6] "my_data_1" "my_data_2" "my_data_3" "this_too"
```
```r
db$delete("random_1")$
delete("random_2")
rm(my_data_1, my_data_2, my_data_3, this_too)
```
### Connect to a data store
The data store builder can also _set up_ or _connect to_ an existing data
store. Configuring (or setting up) a data store means creating a
configuration file for it. This is useful for sharing data stores: if the
data files are in a common disk area or data base, then additional users
can just _setup_ these as data stores for themselves with
`data_store$setup`. After setting up a data store, it appears in the
list of data stores in the `data_store$ls()` output.
If a data store was already set up, users can _connect_ to it with
`data_store$connect`. Both `data_store$setup` and `data_store$connect`
return handles that can be used for data store manipulation.
```r
rm(db)
db <- data_store$connect("acme")
db$keys()
## [1] "hello_again" "hello"
```
### Destroying data stores
The builder can also destroy a data store. This removes its configurarion
file, and also all data.
```r
data_store$destroy("acme")
data_store$ls()
## character(0)
```
## Data store backends
Where and how the data is stored depends on the data store backend
used. The package comes with the `file` backend, which stores each *value*
in a file that is named according to its *key*. Other backends are (well,
will be) available in separate R packages.
<file_sep>/man/data_store_builder_internal.Rd
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{dsb_new}
\alias{dsb_config_file}
\alias{dsb_connect}
\alias{dsb_create}
\alias{dsb_destroy}
\alias{dsb_find_backend}
\alias{dsb_forget}
\alias{dsb_get_config}
\alias{dsb_ls}
\alias{dsb_new}
\alias{dsb_setup}
\alias{dsb_store_exists}
\alias{dsb_write_config}
\title{Create a data store builder}
\usage{
dsb_new(self, private, dir)
dsb_create(self, private, type, name, overwrite_config = FALSE,
overwrite_data = FALSE, ...)
dsb_setup(self, private, type, name, overwrite, ...)
dsb_connect(self, private, name)
dsb_ls(self, private)
dsb_destroy(self, name)
dsb_forget(self, private, name)
dsb_store_exists(self, private, name)
dsb_config_file(self, private, name)
dsb_get_config(self, private, name)
dsb_find_backend(self, type)
dsb_write_config(self, private, name, overwrite = FALSE, ...)
}
\arguments{
\item{dir}{The directory of the configuration files.}
\item{type}{Data store backend type.}
\item{name}{Name of data store.}
\item{overwrite_config}{Overwrite an already existing configuration file.}
\item{overwrite_data}{Overwrite an already existing data store.}
\item{...}{Additional arguments to the \code{new} method of the
backend.}
\item{type}{Data store type.}
\item{name}{Data store name.}
\item{overwrite}{Overwrite an already existing configuration file.}
\item{...}{Additional arguments to the \code{new} method of the
backend.}
\item{name}{Data store name.}
\item{name}{Data store name.}
\item{name}{Data store name}
\item{name}{Data store name}
}
\description{
Create a data store builder
Create the actual storage, using some backend, as specified by
the \code{type} argument. Also create the configuration file for
the backend, and return a connection to it.
Create the configuration file for an already existing data store,
and return a connection to it.
Connect to data store that was already set up
These are YAML files (i.e. files with a \code{.yaml}
extension, located in our store directory.
Destroy a data store
Remove the configutation file for a data store (but keep the
data store itself intact).
Check if a data store exists
Data store configuration file location
Data store configuration contents
}
\details{
It is possible that there are other files here, e.g. if the
data dir and the config dir are the same (as on OSX), then
the data files (directories) of the "file" backend are here as
well. But they should not have a \code{.yaml} extension.
}
\keyword{internal}
<file_sep>/R/builder.R
## ---------------------------------------------------------------------
#' Data store builder
#'
#' @include utils.R
#' @importFrom rappdirs user_config_dir
#' @export
data_store_builder <- R6Class("data_store_builder",
public = list(
portable = TRUE,
initialize = function(dir = user_config_dir(appname())) {
dsb_new(self, private, dir)
},
create = function(type = "file", name, overwrite_config = FALSE,
overwrite_data = FALSE, ...) {
dsb_create(self, private, type, name,
overwrite_config = overwrite_config,
overwrite_data = overwrite_data, ...)
},
setup = function(type = "file", name, overwrite = FALSE, ...) {
dsb_setup(self, private, type, name, overwrite, ...)
},
connect = function(name) { dsb_connect(self, private, name) },
ls = function() { dsb_ls(self, private) },
destroy = function(name) { dsb_destroy(self, name) },
forget = function(name) { dsb_forget(self, private, name) }
),
private = list(
dir = NA,
store_exists = function(name) { dsb_store_exists(self, private, name) },
config_file = function(name) { dsb_config_file(self, private, name) },
get_config = function(name) { dsb_get_config(self, private, name) },
find_backend = function(type) { dsb_find_backend(self, type) },
write_config = function(name, overwrite = FALSE, ...) {
dsb_write_config(self, private, name, overwrite = overwrite, ...)
}
)
)
## ---------------------------------------------------------------------
#' Create a data store builder
#'
#' @param dir The directory of the configuration files.
#'
#' @keywords internal
#' @rdname data_store_builder_internal
dsb_new <- function(self, private, dir) {
private$dir <- dir
}
## ---------------------------------------------------------------------
#' Create a data store
#'
#' Create the actual storage, using some backend, as specified by
#' the \code{type} argument. Also create the configuration file for
#' the backend, and return a connection to it.
#'
#' @param type Data store backend type.
#' @param name Name of data store.
#' @param overwrite_config Overwrite an already existing configuration file.
#' @param overwrite_data Overwrite an already existing data store.
#' @param ... Additional arguments to the \code{new} method of the
#' backend.
#'
#' @importFrom assertthat assert_that is.string
#' @keywords internal
#' @rdname data_store_builder_internal
dsb_create <- function(self, private, type, name, overwrite_config = FALSE,
overwrite_data = FALSE, ...) {
assert_that(is.string(name))
backend <- private$find_backend(type)
private$write_config(name, overwrite = overwrite_config,
type = type, ...)
backend[["new"]](name, create = TRUE, overwrite = overwrite_data, ...)
}
## ---------------------------------------------------------------------
#' Set up a data store
#'
#' Create the configuration file for an already existing data store,
#' and return a connection to it.
#'
#' @param type Data store type.
#' @param name Data store name.
#' @param overwrite Overwrite an already existing configuration file.
#' @param ... Additional arguments to the \code{new} method of the
#' backend.
#'
#' @keywords internal
#' @rdname data_store_builder_internal
dsb_setup <- function(self, private, type, name, overwrite, ...) {
assert_that(is.string(name))
backend <- private$find_backend(type)
private$write_config(name, overwrite = overwrite, type = type, ...)
backend[["new"]](name, create = FALSE, overwrite = FALSE, ...)
}
## ---------------------------------------------------------------------
#' Connect to data store that was already set up
#'
#' @param name Data store name.
#'
#' @keywords internal
#' @rdname data_store_builder_internal
dsb_connect <- function(self, private, name) {
assert_that(private$store_exists(name))
config <- private$get_config(name)[[name]]
args <- config %>%
modifyList(list(type = NULL, name = name, create = FALSE,
overwrite = FALSE))
config[["type"]] %>%
private$find_backend() %>%
`[[`("new") %>%
do.call(args)
}
## ---------------------------------------------------------------------
#' List data stores
#'
#' These are YAML files (i.e. files with a \code{.yaml}
#' extension, located in our store directory.
#'
#' It is possible that there are other files here, e.g. if the
#' data dir and the config dir are the same (as on OSX), then
#' the data files (directories) of the "file" backend are here as
#' well. But they should not have a \code{.yaml} extension.
#'
#' @keywords internal
#' @rdname data_store_builder_internal
dsb_ls <- function(self, private) {
list.files(private$dir, pattern="\\.yaml$") %>%
sub(pattern = "\\.yaml$", replacement = "")
}
## ---------------------------------------------------------------------
#' Destroy a data store
#'
#' @param name Data store name.
#'
#' @keywords internal
#' @rdname data_store_builder_internal
dsb_destroy <- function(self, name) {
self$connect(name)$destroy()
self$forget(name)
}
## ---------------------------------------------------------------------
#' Forget a data store
#'
#' Remove the configutation file for a data store (but keep the
#' data store itself intact).
#'
#' @param name Data store name
#'
#' @keywords internal
#' @rdname data_store_builder_internal
dsb_forget <- function(self, private, name) {
name %>%
private$config_file() %>%
unlink()
}
## ---------------------------------------------------------------------
#' Check if a data store exists
#'
#' @importFrom assertthat assert_that is.string
#' @keywords internal
#' @rdname data_store_builder_internal
dsb_store_exists <- function(self, private, name) {
assert_that(is.string(name))
name %>%
private $config_file() %>%
file.exists()
}
## ---------------------------------------------------------------------
#' Data store configuration file location
#'
#' @param name Data store name
#'
#' @keywords internal
#' @rdname data_store_builder_internal
dsb_config_file <- function(self, private, name) {
file.path(private$dir, paste0(name, ".yaml"))
}
## ---------------------------------------------------------------------
#' Data store configuration contents
#'
#' @importFrom assertthat assert_that
#' @importFrom yaml yaml.load_file
#' @keywords internal
#' @rdname data_store_builder_internal
dsb_get_config <- function(self, private, name) {
assert_that(private$store_exists(name))
name %>%
private$config_file() %>%
yaml.load_file()
}
#' @importFrom assertthat assert_that is.string
#' @keywords internal
#' @rdname data_store_builder_internal
dsb_find_backend <- function(self, type) {
if (inherits(type, "abstract_data_store")) { return(type) }
assert_that(is.string(type))
backend_name <- paste0(type, "_data_store")
if (!exists(backend_name)) { stop("Unknown data store backend ", type) }
get(backend_name)
}
#' @importFrom assertthat assert_that is.string
#' @importFrom magrittr set_names
#' @importFrom yaml as.yaml
#' @keywords internal
#' @rdname data_store_builder_internal
dsb_write_config <- function(self, private, name, overwrite = FALSE, ...) {
assert_that(is.string(name))
config_file <- private$config_file(name)
if (file.exists(config_file) && ! overwrite) {
stop("Configuration file for ", name,
" already exists, see the 'overwrite' argument")
}
list(...) %>%
list() %>%
set_names(name) %>%
as.yaml() %>%
cat("\n", file = config_file)
}
## ---------------------------------------------------------------------
#' Default data store builder
#'
#' @export
data_store <- data_store_builder$new()
<file_sep>/vignettes/README.Rmd
---
title: "The datastore package"
output:
html_document:
theme: null
css: style.css
toc: yes
---
<!--
%\VignetteEngine{knitr::rmarkdown}
%\VignetteIndexEntry{Datastore - introduction}
-->
```{r, setup, echo = FALSE, message = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "##",
error = TRUE,
tidy = FALSE,
fig.width = 8,
fig.height = 8)
```
# Datastore - introduction
[](https://travis-ci.org/gaborcsardi/datastore)
[](https://ci.appveyor.com/project/gaborcsardi/datastore)
datastore is an R package that creates project-specific key-value
data stores to hold arbitrary R objects. Data stores are persistent
across R sessions and can be shared among users.
## Installation
```{r, eval = FALSE}
library(devtools)
install_github("hadley/rappdirs")
install_github("gabocsardi/falsy")
install_github("gaborcsardi/datastore")
```
```{r}
library(datastore)
library(falsy)
```
## Usage
### Creating data stores
Data store configurations are managed by _data store builders_.
The default builder in the package (called `data_store`)
keeps config data in the standard
[application directories](https://github.com/hadley/rappdirs).
In the examples below, we create another builder that uses the R sesssions
temporary directory for all configuration files. For real data stores you
will want to skip this step, and just use the default builder.
```{r}
dir.create(config_dir <- tempfile())
data_store <- data_store_builder$new(dir = config_dir)
```
The `data_store` builder can create, list and destroy
data stores. Right now the list of data stores is empty:
```{r}
data_store$ls()
```
Now we will create a data store named `acme`. It will use the `file`
backend. It will use `data_dir` for storing the actual data.
For real projects a good choice for the data location is the
[_user data directory_](https://github.com/hadley/rappdirs) of the
project. For data stores shared among users, another
location can be speficied.
```{r, results = 'hide'}
dir.create(data_dir <- tempfile())
db <- data_store$create(type = "file", name = "acme", data_dir = data_dir)
```
If everything went fine and the data directory was created,
then we have now an empty data store. `data_store$create` also returns a
handle to manipulate the newly created data store. E.g. `db$keys()` lists
all keys:
```{r}
data_store$ls()
db$keys()
```
### Storing and retrieving data
A `file` data store can hold arbitrary R objects, and multiple objects can
be stored together, under the same key. This is analogous to using `save()`
to store R objects in a file, with the ease of not having to remember
where the data is on the disk, and a unified interface to it.
```{r, results = 'hide'}
my_data_1 <- rnorm(100)
my_data_2 <- runif(100)
db$store(key = "random", my_data_1, my_data_2)
```
Note that the `key` argument of `db$store` must be explicitly named,
as all unnamed arguments will be treated as data.
`db$retrieve` queries the value of a key, and returns it in a list.
Alternatively the value(s) can be also loaded into an environment
with `db$load`.
```{r}
db$keys()
from_db <- db$retrieve("random")
names(from_db)
identical(from_db$my_data_1, my_data_1)
```
`db$exists` tests whether a key exists, and `db$delete` deletes a key from
the data store.
```{r}
db$exists("random")
```
```{r, results='hide'}
db$delete("random")
```
### Chaining operations
`db$store`, `db$load` and `db$delete` operations can be chained to store,
load or delete multiple values at once with a denser syntax:
```{r, results = 'hide'}
my_data_3 <- "Hello world!"
this_too <- "Hello again!"
db$store(key = "random_1", my_data_1)$
store(key = "random_2", my_data_2)$
store(key = "hello", my_data_3)$
store(key = "hello_again", this_too)
rm(my_data_1, my_data_2)
db$load("random_1")$
load("random_2")
```
```{r}
ls()
```
```{r, results = 'hide'}
db$delete("random_1")$
delete("random_2")
rm(my_data_1, my_data_2, my_data_3, this_too)
```
### Connect to a data store
The data store builder can also _set up_ or _connect to_ an existing data
store. Configuring (or setting up) a data store means creating a
configuration file for it. This is useful for sharing data stores: if the
data files are in a common disk area or data base, then additional users
can just _setup_ these as data stores for themselves with
`data_store$setup`. After setting up a data store, it appears in the
list of data stores in the `data_store$ls()` output.
If a data store was already set up, users can _connect_ to it with
`data_store$connect`. Both `data_store$setup` and `data_store$connect`
return handles that can be used for data store manipulation.
```{r}
rm(db)
db <- data_store$connect("acme")
db$keys()
```
### Destroying data stores
The builder can also destroy a data store. This removes its configurarion
file, and also all data.
```{r}
data_store$destroy("acme")
data_store$ls()
```
## Data store backends
Where and how the data is stored depends on the data store backend
used. The package comes with the `file` backend, which stores each *value*
in a file that is named according to its *key*. Other backends are (well,
will be) available in separate R packages.
|
fb237219c446c4b349cb3246493accda97a952dd
|
[
"Makefile",
"R",
"RMarkdown",
"Markdown"
] | 20 |
R
|
gaborcsardi/datastore
|
932abd3077bf4d9cf59e36414937ae88515d6eb5
|
ad7fd09267287be78557d8bdd8f39b126efdd2f9
|
refs/heads/main
|
<file_sep>import React from 'react';
import styled from 'styled-components';
import { Container, Row, Col } from 'react-grid-system';
import Map from '../../_components/map';
const MainCont = styled.div`
padding: 2rem 0 4rem;
`
const Title = styled.h2`
color: #002438;
margin-bottom: 4rem;
`
export default ({ coordinates })=> {
return(
<MainCont>
<Container>
<Row>
<Col xs={12}>
<Title>Ubicaciรณn</Title>
</Col>
<Col xs={12}>
<Map
lat={coordinates[1]}
lng={coordinates[0]}
height={500}
zoom={10}
/>
</Col>
</Row>
</Container>
</MainCont>
)
}<file_sep>import React, { Fragment, useEffect } from "react";
import { gsap } from "gsap";
import { v1 as uuid } from "uuid";
import CUSTOMERS from "../../../_constants/CUSTOMERS_CAROUSEL.json";
import "./index.css";
export default ({ title }) => {
let controller = null;
useEffect(() => {
gsap.set(".box", {
x: (i) => i * 200,
});
const mod = gsap.utils.wrap(0, 200 * CUSTOMERS.length);
const tween = gsap.to(".box", {
paused: true,
duration: 120,
x: "-=" + 200 * CUSTOMERS.length,
ease: "linear",
//paused: true,
repeat: -1,
modifiers: {
x: (x) => mod(parseFloat(x)) + "px",
},
});
controller = tween;
tween.play();
});
return (
<section className='very-main-cont'>
<h2 className='title'>{title}</h2>
<div className='main-cont' style={{ height: "250px" }} id='elm'>
{CUSTOMERS.map((item, i) => (
<div key={uuid()} className='box'>
<img src={item.logo} alt={item.office} className='logo' />
</div>
))}
</div>
</section>
);
};
<file_sep>import React from 'react';
import Layout from '../_layout';
import New from '../_sections/new';
export default ()=> (
<Layout>
<New />
</Layout>
)<file_sep># clasi-template-welink
<file_sep>import { useReducer, useCallback, useContext } from 'react';
import context from '../_context';
import { navigate } from 'gatsby';
import { urlBuilder } from '../_util';
export default (inputs)=> {
const [params, setParams] = useReducer((current, next) => ({ ...current, ...next }), inputs);
const builderId = useContext(context).builderId;
const onChange = e => {
setParams({ [e.target.id]: e.target.value });
}
const onFinish = useCallback(()=> {
const url = urlBuilder(`/properties`, { builderId,...params});
navigate(url);
},[params, builderId]);
return { values: params, onChange, onFinish, setInitial: setParams }
}<file_sep>export { default as Select } from './select';
export { default as Input } from './text';
export { default as Textarea } from './textarea';
export { default as Autocomplete } from './auto-suggestion';<file_sep>import React from 'react';
import Layout from '../_layout';
import Contact from '../_sections/contact';
import Ubication from '../_sections/about/ubication';
export default ({ location })=> {
const { pathname } = location;
return(
<Layout dark={pathname === "/contact" ? true : false}>
<Contact />
</Layout>
)
}<file_sep>/**
* Configure your Gatsby site with this file.
*
* See: https://www.gatsbyjs.org/docs/gatsby-config/
*/
module.exports = {
/* Your site config here */
siteMetadata: {
title: `GPRB Propiedades Industriales`,
},
plugins: [
{
resolve: `gatsby-plugin-styled-components`,
options: {
displayName: false,
},
},
{
resolve: "gatsby-plugin-react-leaflet",
options: {
linkStyles: true, // (default: true) Enable/disable loading stylesheets via CDN
},
},
/*{
resolve: "gatsby-plugin-prefetch-google-fonts",
options: {
fonts: [
{
family: 'Open Sans',
variants: [`300`]
//subsets: [`latin`]
},
],
},
},*/
{
resolve: "gatsby-plugin-react-svg",
options: {
rule: {
include: /_icons/,
},
},
},
{
resolve: `gatsby-plugin-manifest`,
options: {
name: "GPRB",
short_name: "GPRB",
start_url: "/",
background_color: "#6b37bf",
// Enables "Add to Homescreen" prompt and disables browser UI (including back button)
// see https://developers.google.com/web/fundamentals/web-app-manifest/#display
display: "standalone",
icon: "static/favicon.png", // This path is relative to the root of the site.
// An optional attribute which provides support for CORS check.
// If you do not provide a crossOrigin option, it will skip CORS for manifest.
// Any invalid keyword or empty string defaults to `anonymous`
},
},
],
};
<file_sep>import React, { useContext, Fragment } from 'react';
import OfficeContext from '../../_context';
import { ReviewCarousel } from '../../_components/carousels'
import styled from 'styled-components';
import { Container, Col, Row } from 'react-grid-system';
import Contact from '../home/contact';
const MainSection = styled.section`
position: relative;
min-height: 25vh;
display: flex;
justify-content: center;
align-items: center;
padding: 0 2rem;
@media(min-width: 768px){
padding: 4rem 0;
}
`
const TitleQuoteCont = styled.div`
//padding-top: 4rem;
`
const SvgCont = styled.svg`
margin: 2rem 0;
fill: ${props => props.theme.main.primaryColor};
`
const QuoteCarouselCont = styled.div`
padding-top: 0rem;
border-radius: 25px;
width: 80%;
background-color: ${props => props.theme.main.primaryColor};
@media(min-width: 768px){
padding-top: 2rem;
}
`
const CarouselCont = styled.div`
position: relative;
width: 100%;
@media(min-width: 768px){
width: 50%;
}
`
const Divider = styled.div`
border-bottom: 1px solid #DADADA;
`
export default ()=> {
const reviews = useContext(OfficeContext).home.reviews.items;
return(
<Fragment>
<Container>
<MainSection>
<QuoteCarouselCont>
<ReviewCarousel />
</QuoteCarouselCont>
</MainSection>
</Container>
<Contact />
</Fragment>
)
}
<file_sep>import React, { useEffect, useState, Fragment } from 'react';
import Swiper from "react-id-swiper";
import 'swiper/swiper-bundle.min.css';
import styled from 'styled-components';
const GalleryCont = styled.div`
width: 100%;
`
const GalleryItem = styled.div`
height: 450px;
width: 100%;
background-size: cover;
background-image: url(${props => props.url});
`
const GalleryThumbCont = styled.div`
//border: 1px solid red;
margin: 0 !important;
background-color: ${props => props.theme.main.primaryColor};
`
const GalleryThumb = styled.div`
height: 100px;
width: 100px;
border-radius: 6px;
background-size: cover;
background-image: url(${props => props.url});
`
export default ({ images })=> {
const [gallerySwiper, getGallerySwiper] = useState(null);
const [thumbnailSwiper, getThumbnailSwiper] = useState(null);
const gallerySwiperParams = {
getSwiper: getGallerySwiper,
lazy: true,
slidesPerView: "auto",
centeredSlides: true,
slidesPerView: 1,
/*navigation: {
nextEl: ".swiper-button-next.customized-swiper-button-next",
prevEl: ".swiper-button-prev.customized-swiper-button-prev",
},*/
containerClass: "customized-swiper-container",
};
const thumbnailSwiperParams = {
getSwiper: getThumbnailSwiper,
spaceBetween: 10,
slidesPerView: "auto",
centeredSlides: true,
slideToClickedSlide: true,
containerClass: "customized-swiper-container-thumb",
};
useEffect(() => {
if (
gallerySwiper !== null &&
gallerySwiper.controller &&
thumbnailSwiper !== null &&
thumbnailSwiper.controller
) {
gallerySwiper.controller.control = thumbnailSwiper;
thumbnailSwiper.controller.control = gallerySwiper;
}
}, [gallerySwiper, thumbnailSwiper]);
return(
<GalleryCont>
<Swiper {...gallerySwiperParams}>
{images.map((i) => (
<GalleryItem url={i} />
))}
</Swiper>
<GalleryThumbCont>
<Swiper {...thumbnailSwiperParams}>
{images.map((i) => (
<GalleryThumb url={i} />
))}
</Swiper>
</GalleryThumbCont>
</GalleryCont>
)
}<file_sep>import React, { useContext, Fragment } from 'react';
import styled from 'styled-components';
import { Container, Row, Col } from 'react-grid-system';
import Context from '../../_context';
import { Button } from '../../_components/buttons';
import { ArrowRightOutlined } from '@ant-design/icons';
import { navigate } from 'gatsby';
const SectionCont = styled.div`
margin-top: ${props => props.noMargin ? 0 : "4rem"};
padding: 2rem 0;
//background-color: ${props => props.theme.main.primaryColor};
//color: ${props => props.theme.main.primaryColor};
`
const TextCont = styled.div`
`
const SectionText = styled.p`
text-align: left;
@media(min-width: 768px){
font-size: 1.2rem;
}
`
const ButtonCont = styled.div`
display: flex;
justify-content: center;
align-items: center;
width: 100%;
`
export default ({ noMargin })=> {
const sectionText = useContext(Context).home.contact.title;
return(
<Fragment>
<SectionCont noMargin={noMargin}>
<Container>
<Row align="center" justify="center">
<Col xs={12} md={9}>
<ButtonCont>
<SectionText>
{sectionText}
</SectionText>
</ButtonCont>
</Col>
<Col xs={12} md={3}>
<ButtonCont>
<Button primary block onClick={()=> navigate("/contact")}>
Contรกctanos
<ArrowRightOutlined style={{ marginLeft: 8 }} />
</Button>
</ButtonCont>
</Col>
</Row>
</Container>
</SectionCont>
</Fragment>
)
}<file_sep>export { default as hexToHsl } from './hex-to-hsl';
export { default as hexTorgba } from './hex-to-rgba';
export { default as chunkArray } from './chunk-array';
export { default as truncate } from './truncate-string';
export { default as removeAccent } from './remove-accent';
export { default as urlBuilder } from './url-builder';
export { default as capitalize } from './capitalize';
export { default as FormatCurrency } from './format-currency';<file_sep>import React, {useState} from 'react';
import styled from 'styled-components';
import { Container, Row, Col, Visible, Hidden } from 'react-grid-system';
import { FormProperty } from '../../_components/forms';
import { useWindowSize } from '../../_hooks';
import { PlusCircleOutlined, MinusCircleOutlined } from '@ant-design/icons';
const SectionCont = styled.div`
background-color: ${props => props.theme.main.primaryColor};
padding: 2rem;
margin-top: 4rem;
@media(min-width: 576px){
margin-top: 0;
}
`
const Header = styled.header`
margin: 1rem 0;
display: flex;
justify-content: space-between;
align-items: center;
`
const Title = styled.h1`
color: #fff;
margin: 0;
`
const IconButton = styled.button`
outline: none;
background-color: transparent;
border: none;
color: rgba(255, 255, 255, .5);
transition: 250ms ease;
&:hover{
color: #fff;
}
`
const ButtonContainer = styled.footer`
display: flex;
justify-content: center;
padding: 1rem;
`
export default ()=> {
const size = useWindowSize();
const [filter, setFilter] = useState(false);
return(
<Container>
<SectionCont>
<Header>
<Title>Propiedades</Title>
<Hidden xs>
{
filter
?(
<IconButton onClick={()=> setFilter(false)}>
<MinusCircleOutlined style={{ marginRight: 8 }} />
Menos filtros
</IconButton>
)
:(
<IconButton onClick={()=> setFilter(true)}>
<PlusCircleOutlined style={{ marginRight: 8 }} />
Mรกs filtros
</IconButton>
)
}
</Hidden>
</Header>
<Row align="center">
<Col xs={12}>
<FormProperty filter={filter} />
</Col>
<Visible xs>
<Col xs={12}>
<ButtonContainer>
{
filter
?(
<IconButton onClick={()=> setFilter(false)}>
<MinusCircleOutlined style={{ marginRight: 8 }} />
Menos filtros
</IconButton>
)
:(
<IconButton onClick={()=> setFilter(true)}>
<PlusCircleOutlined style={{ marginRight: 8 }} />
Mรกs filtros
</IconButton>
)
}
</ButtonContainer>
</Col>
</Visible>
</Row>
</SectionCont>
</Container>
)
}<file_sep>//import { CheckOutlined as Extra from '@ant-design/icons';
import Disconnected from './svg/disconnected.svg';
import Process from './svg/process.svg';
import Connected from './svg/connected.svg';
import Apartment from './svg/apartment.svg';
import Balcony from './svg/balcony.svg';
import Bank from './svg/bank.svg';
import Bath from './svg/bath.svg';
import Bicycle from './svg/bicycle.svg';
import Book from './svg/book.svg';
import Building from './svg/building.svg';
import Bus from './svg/bus.svg';
import Clinic from './svg/clinic.svg';
import Coffee from './svg/coffee.svg';
import Extra from './svg/check.svg';
import Film from './svg/film.svg';
import Floor from './svg/floor.svg';
import Heating from './svg/heating.svg';
import Hospital from './svg/rooms.svg';
import Juridic from './svg/judicial.svg';
import Internet from './svg/laptop.svg';
import Office from './svg/office.svg';
import House from './svg/house.svg';
import Parking from './svg/parking.svg';
import Pet from './svg/pet.svg';
import Phone from './svg/phone.svg';
import Price from './svg/price.svg';
import Restaurant from './svg/restaurant.svg';
import Rooms from './svg/rooms.svg';
import Sale from './svg/sale.svg';
import School from './svg/schools.svg';
import Security from './svg/security.svg';
import Spending from './svg/spending.svg';
import Store from './svg/store.svg';
import Supermarket from './svg/supermarket.svg';
import Surface from './svg/surface.svg';
import Water from './svg/water.svg';
import Industrial from './svg/industrial.svg';
import Depot from './svg/depot.svg';
import Site from './svg/site.svg';
import Agro from './svg/agro.svg';
export default {
Apartment,
Balcony,
Bank,
Bath,
Bicycle,
Book,
Building,
Bus,
Clinic,
Coffee,
Extra,
Film,
Hospital,
Juridic,
Internet,
Office,
Pet,
Phone,
Restaurant,
Sale,
School,
Security,
Store,
Supermarket,
Water,
Floor,
Heating,
House,
Parking,
Price,
Rooms,
Spending,
Surface,
Depot,
Industrial,
Site,
Agro,
Process,
Connected,
Disconnected,
}
<file_sep>import React, { useContext } from 'react';
import styled from 'styled-components';
import Context from '../../../_context';
import { Container, Row, Col } from 'react-grid-system';
import Gallery from './gallery';
import Description from './description';
const MainCont = styled.div`
padding-top: 10rem;
`
export default ({ state })=> {
//const state = useContext(Context).singleProperty;
return(
<MainCont>
<Container>
<Row gutterWidth={0}>
<Col xs={12} md={6}>
<Gallery images={state.images} />
</Col>
<Col xs={12} md={6}>
<Description description={state} />
</Col>
</Row>
</Container>
</MainCont>
)
}<file_sep>import React from "react";
import styled from "styled-components";
import { Row, Col, Visible } from "react-grid-system";
import InteractionButtons from "../interaction-buttons";
import { truncate, FormatCurrency } from "../../../_util";
import { EnvironmentOutlined } from "@ant-design/icons";
const MainCont = styled.div`
background-color: #fff;
min-height: 100%;
@media (min-width: 768px) {
padding: 2rem 4rem;
}
`;
const OperationCode = styled.p`
color: ${(props) => props.theme.main.primaryColor};
font-weight: bold;
`;
const Title = styled.h1`
font-weight: 300;
font-size: 40px;
`;
const Price = styled(Title)`
color: ${(props) => props.theme.main.primaryColor};
`;
const UbicationCont = styled.div`
display: flex;
align-items: center;
justify-content: flex-start;
font-weight: bold;
margin-bottom: 2rem;
@media (min-width: 768px) {
margin: 0;
}
`;
const SvgCont = styled.span`
font-size: 2rem;
margin-right: 1rem;
color: ${(props) => props.theme.main.primaryColor};
`;
export default ({ description }) => {
return (
<MainCont>
<Row>
<Col xs={12}>
<OperationCode>
{`${description.operation}-COD.: ${description.code}`}
</OperationCode>
<Title>{truncate(description.title, 70)}</Title>
<Price>
{`${description.currency} ${FormatCurrency(
description.currency,
description.value
)}`}
</Price>
<UbicationCont>
<SvgCont>
<EnvironmentOutlined />
</SvgCont>
<span>{description.ubication.commune}</span>
</UbicationCont>
</Col>
<Visible xs>
<InteractionButtons />
</Visible>
</Row>
</MainCont>
);
};
<file_sep>import React, { useContext } from "react";
import styled from "styled-components";
import {
Container,
Row,
Col,
Hidden,
setConfiguration,
} from "react-grid-system";
import Context from "../../../_context";
import Logo from "../../../_layout/header/logo";
import { FormatCurrency } from "../../../_util";
const MainCont = styled.div`
margin: 20px;
`;
const Image = styled.img`
width: 100%;
`;
const Title = styled.h1`
font-weight: 300;
font-size: 30px;
`;
const MainImage = styled.div`
width: 100%;
padding: 10px 0px;
`;
const PublicObs = styled.p`
font-weight: 500;
white-space: pre-line;
margin: 0.5rem 0;
`;
const Subtitle = styled.h2`
font-weight: bold;
margin: 2rem 0;
color: #000;
`;
const UserCont = styled.div`
margin-top: 2rem;
display: flex;
align-items: center;
`;
const Avatar = styled.img`
object-fit: cover;
object-position: center;
min-height: 48px;
min-width: 48px;
height: 60px;
width: 60px;
border-radius: 50%;
@media (min-width: 768px) {
min-height: 76px;
min-width: 76px;
height: 76px;
width: 76px;
flex-grow: 0;
flex-shrink: 1;
}
`;
const UserInfoCont = styled.ul`
list-style: none;
padding: 0;
margin: 0 1rem;
`;
const UserInfoItem = styled.li`
color: #000;
`;
const UserInfoItemName = styled(UserInfoItem)`
font-size: 20px;
font-weight: bold;
`;
const Price = styled.p`
font-size: 25px;
font-weight: bold;
color: ${(props) => props.theme.main.primaryColor};
`;
export default ({ state }) => {
const context = useContext(Context);
console.log(context);
return (
<MainCont>
<Container styled={{ display: "none" }}>
<Col md={12}>
<Logo dark />
</Col>
<Row>
<Col xs={12} md={12}>
<Title>{state.title}</Title>
</Col>
<Col xs={12} md={12}>
COD: {state.code}
</Col>
</Row>
<br />
<Row style={{ height: "400px" }}>
<Col xs={12}>
<Row>
<Col xs={8} md={8}>
<MainImage>
<img
style={{
width: "100%",
height: "400px",
objectFit: "contain",
}}
src={state.mainImage}
/>
</MainImage>
</Col>
<Col xs={4} md={4}>
<MainImage>
<img
style={{
width: "100%",
height: "200px",
objectFit: "contain",
}}
src={state.images[1].url}
/>
</MainImage>
<MainImage>
<img
style={{
width: "100%",
height: "200px",
objectFit: "contain",
}}
src={state.images[2].url}
/>
</MainImage>
</Col>
</Row>
</Col>
</Row>
<br />
<br />
<br />
<Row>
<Col xs={12} md={8}>
<PublicObs>{state.publicObservations}</PublicObs>
</Col>
<Col xs={12} md={4}>
<Price>
{`${state.currency} ${FormatCurrency(
state.currency,
state.value
)}`}
</Price>
<h3>
{state.ubication.commune} - EN {state.operation}
</h3>
<br />
<UserCont>
<Avatar
src={state._comercialUser[0].avatar}
alt={state._comercialUser_person[0].lastName}
/>
<UserInfoCont>
<UserInfoItemName>
{state._comercialUser_person[0].firstName}{" "}
{state._comercialUser_person[0].lastName}
</UserInfoItemName>
<br />
<UserInfoItem>
{state._comercialUser_person[0].phone &&
state._comercialUser_person[0].phone.countryCode +
" " +
state._comercialUser_person[0].phone.areaCode +
" " +
state._comercialUser_person[0].phone.phoneNumber}
</UserInfoItem>
<UserInfoItem>{state._comercialUser[0].email}</UserInfoItem>
<UserInfoItem>{state._comercialUser[0].position}</UserInfoItem>
</UserInfoCont>
</UserCont>
</Col>
</Row>
</Container>{" "}
</MainCont>
);
};
<file_sep>import styled from 'styled-components';
export default styled.textarea`
background-color: transparent;
border-radius: 3px;
padding: 5px;
width: 100%;
border: none;
font-size: 1rem;
font-family: "Relaway";
margin-bottom: 1rem;
border: ${props => props.gray ? "1px solid #EBEBEB" : "none" };
@media(min-width: 768px){
margin-bottom: ${props => props.vertical ? "1rem" : "0"};
box-shadow: ${props => props.shadow ? "0px 0px 1px rgba(0, 0, 0, .12), 0px 0px 2px rgba(0, 0, 0, .12), 0px 4px 4px rgba(0, 0, 0, .12), 0px 8px 8px rgba(0, 0, 0, .12)" : "none"};
}
&::placeholder{
color: #fff;
}
`<file_sep>import React, { useContext } from 'react';
import styled from 'styled-components';
import { Container, Row, Col } from 'react-grid-system';
import Context from '../../_context';
const MainCont = styled.section`
background-image: linear-gradient( 0deg, rgba(0, 0, 0, .5) 40%, transparent), url(${props => props.theme.about.hero.background});
background-size: cover;
background-repeat: no-repeat;
`
const TitleCont = styled.div`
position: relative;
height: 60vh;
display: flex;
justify-content: flex-start;
align-items: center;
`
const Title = styled.h1`
position: relative;
color: #fff;
padding: 1rem;
text-align: left;
margin: 0;
width: 100%;
font-weight: 300;
@media(min-width: 576px){
text-align: left;
width: 50vw;
}
`
const Image = styled.img`
width: 50vw;
height: 100%;
object-fit: cover;
object-position: center;
`
export default ()=> {
const state = useContext(Context).about;
return(
<MainCont>
<Container>
<TitleCont>
<Title>
Noticias
</Title>
</TitleCont>
</Container>
</MainCont>
)
}<file_sep>import React from "react";
import styled from "styled-components";
import { Row, Col, Hidden } from "react-grid-system";
import InteractionButtons from "../interaction-buttons";
import Icons from "../../../_icons";
const MainCont = styled.div`
padding: 2rem 1rem;
border: 1px solid #ebebeb;
border-right: none;
`;
const PublicObs = styled.p`
margin: 2rem 0;
white-space: pre-line;
`;
const CharsCont = styled.ul`
list-style: none;
padding: 0;
margin: 0;
margin: 2rem 0;
`;
const CharItemLi = styled.li`
display: flex;
align-items: center;
justify-content: flex-start;
margin-bottom: 1rem;
color: #002438;
`;
const CharItem = ({ icon, name }) => {
const Icon = Icons[icon];
return (
<CharItemLi>
<Icon className='clasi-icon' />
<span style={{ marginLeft: 16 }}>{name}</span>
</CharItemLi>
);
};
export default ({ description }) => {
//const charsGeneral = description.characteristics.filter(c => c.type === "GENERAL");
//const charsOthers = description.characteristics.filter(c => c.type !== "GENERAL");
return (
<MainCont>
<Row>
<Hidden xs>
<InteractionButtons />
</Hidden>
<Col xs={12}>
<PublicObs>{description.publicObservations}</PublicObs>
</Col>
<Col xs={12}>
<h2 style={{ color: "#002438" }}>Caracterรญsticas</h2>
<Row>
<Col xs={12} md={6}>
<CharsCont>
{
//charsGeneral.slice(0, 7).map((c) => <CharItem key={c.id} {...c} />)
description.characteristics
.slice(0, description.characteristics.length / 2)
.map((c) => (
<CharItem key={c.id} {...c} />
))
}
</CharsCont>
</Col>
<Col xs={12} md={6}>
<CharsCont>
{
//charsGeneral.slice(7, charsGeneral.length).map((c) => <CharItem key={c.id} {...c} />)
description.characteristics
.slice(description.characteristics.length / 2, -1)
.map((c) => (
<CharItem key={c.id} {...c} />
))
}
</CharsCont>
</Col>
</Row>
</Col>
<Col xs={12}>
{/*<h2 style={{ color: "#002438" }}>Otros servicios</h2>*/}
<Row>
<Col xs={12} md={6}>
<CharsCont>
{
//charsOthers.slice(0, 7).map((c) => <CharItem key={c.id} {...c} />)
[].map((c) => (
<CharItem key={c.id} {...c} />
))
}
</CharsCont>
</Col>
<Col xs={12} md={6}>
<CharsCont>
{
//charsOthers.slice(7, charsOthers.length).map((c) => <CharItem key={c.id} {...c} />)
[].map((c) => (
<CharItem key={c.id} {...c} />
))
}
</CharsCont>
</Col>
</Row>
</Col>
</Row>
</MainCont>
);
};
<file_sep>import { useReducer, useEffect, useMemo, useCallback } from "react";
import { useQueryParam } from "gatsby-query-params";
import noData from "../_context/state";
import Data from "../_context/data.class";
export default () => {
const builderId = "60fadfdeb56dcf0008635ca2";
const [query, setQuery] = useReducer(
(current, next) => ({ ...current, ...next }),
{
data: null,
error: false,
loading: true,
}
);
const baseUrl = useMemo(() => {
console.log(
`https://api.clasihome.com/rest/builders?builderId=${builderId}`
);
return `https://api.clasihome.com/rest/builders?builderId=${builderId}`;
}, [builderId]);
const getFeatured = async (id, typeId, maxProperties) => {
try {
console.log(id, typeId);
const data = await fetch(
//`https://api.clasihome.com/rest/properties?id=${id}&typeId=${typeId}&status=PUBLICADA&limit=${maxProperties}&integration=WEB&featured=true`
`https://api.clasihome.com/rest/properties?id=${id}&typeId=${typeId}&status=PUBLICADA&integration=WEB&featured=true`
);
//const data = await fetch(`https://api.clasihome.com/rest/properties?id=${id}&typeId=${typeId}&status=PUBLICADA&limit=${maxProperties}`);
const result = await data.json();
return result;
} catch (e) {
console.log("ERROR PROPIEDADES DESTACADAS ", e);
}
};
const getData = useCallback(async () => {
try {
if (builderId) {
const data = await fetch(baseUrl);
const result = await data.json();
console.log("INITIAL DATA", result);
const featuredProperties = await getFeatured(
result.office,
//"5e8e36b31c9d440000d35090",
"office",
6
);
result.home.properties.items = featuredProperties.properties;
/* let propertiesUrl = `https://api.clasihome.com/rest/properties?id=${
result.user ? result.user : result.office
}&typeId=${result.user ? "user" : "office"}&status=PUBLICADA&limit=6`;
if (!result.home.properties.items) {
propertiesUrl =
"https://api.clasihome.com/rest/properties?id=5e8e36b31c9d440000d35090&typeId=office&status=PUBLICADA&limit=6";
} */
/* const propertiesData = await fetch(propertiesUrl);
const propertiesResult = await propertiesData.json();
result.home.properties.items = propertiesResult.properties; */
console.log("FINAL DATA", result);
setQuery({ loading: false, data: new Data(result) });
} else throw new Error("No builderId");
} catch (e) {
console.log(e);
//setQuery({ loading: false, error: true });
const propertiesData = await fetch(
`https://api.clasihome.com/rest/properties?id=5e8e36b31c9d440000d35090&typeId=office&status=PUBLICADA&limit=8`
);
const propertiesResult = await propertiesData.json();
noData.home.properties.items = propertiesResult.properties;
setQuery({ loading: false, error: false, data: noData });
}
}, [builderId]);
useEffect(() => {
/*if(builderId){
getData();
}
else{
setQuery({ loading: false, error: false, data: noData });
}*/
getData();
}, [builderId]);
return query;
};
<file_sep>import noData from './state';
import { makePropertiesFilters } from '../_util';
import { v1 as uuid } from 'uuid';
export default class{
constructor(props){
this.builderId = props._id;
this.office = {
typeId: props.user ? "user" : "office",
id: props.user ? props.user : props.office,
name: props.email,
email: props.email,
address: props.address,
phone: props.phone,
mobile: props.movil,
lat: props.lat ? props.lat : '0',
lng: props.lng ? props.lng : '0',
};
this.main = {
primaryColor: props.primaryColor,
secondaryColor: "#ffffff",
favicon: "",
logo:{
isImage: props.logo ? true : false,
value: props.logo,
},
logoDark:{
isImage: props.logoDark ? true : false,
value: props.logoDark ? props.logoDark : '/logo-dark.svg',
},
};
this.home = {
hero:{
visible: true,
title: props.home.hero.title || noData.home.hero.title,
background: props.home.hero.background || noData.home.hero.background,
},
properties:{
visible: true,
title: props.home.properties.title || noData.home.properties.title,
footer: props.home.properties.footer || noData.home.properties.footer,
bannerImage: "",
buttonText: props.home.properties.buttonText || noData.home.properties.buttonText,
items: props.home.properties.items || noData.home.properties.items,
},
about:{
banner:{
visible: true,
image: props.home.about.banner.image || noData.home.about.banner.image,
title: props.home.about.banner.title || noData.home.about.banner.title,
subTitle: props.home.about.banner.subTitle || noData.home.about.banner.subTitle,
buttonText: props.home.about.banner.buttonText || noData.home.about.banner.buttonText,
},
},
services:{
visible: true,
items: props.home.services.items || noData.home.services.items,
},
reviews:{
visible: true,
items: props.home.reviews.items || noData.home.reviews.items,
},
contact:{
visible: true,
title: "ยฟNecesitas vender, arrendar o comprar una propiedad? Somos tu socio perfecto.",
buttonText: "contacto"
},
}
this.about = {
...props.about,
history:{
...props.about.history,
background: props.about.history.background ? props.about.history.background : '/about-hero.jpg',
},
hero:{
...props.about.hero,
background: props.about.hero.background || '/about-hero.jpg',
},
description:{
...props.about.description,
background: "/about-description.jpg",
},
stats:{
items:{
years:{
value: props.about.stats.years || 50,
meta: "Aรฑos en el mercado",
},
transactions:{
value: props.about.stats.transactions || 500,
meta: "Ventas y arriendos anuales",
},
properties:{
value: props.about.stats.properties || 1000,
meta: "Propiedades en administraciรณn",
},
proffesionals:{
value: props.about.stats.proffesionals || 70,
meta: "Profesionales",
},
}
}
};
this.paginateProperties = props.paginateProperties;
this.singleProperty = {
mainImage: "/property/main.jpg",
images: [
"/property/main.jpg",
"/property/1.jpg",
"/property/2.jpg",
"/property/3.jpg",
"/property/4.jpg",
"/property/5.jpg",
],
publicObservations: "Esta es la descripciรณn pรบbica hecha pr el usuario donde pone lo que quiera poner. Hermosa Casa en lomas verdes, con 3 pisos. Aqui vivio drew Barirmore durante su corta estadia en Chile.",
operation: "Arriendo",
code: "EB092734",
title: "Casa en Santiago de Chile",
currency: "UF",
value: "75.000",
ubication:{
region: "Rosario",
commune: "Casorra alzamona",
location:{
coordinates: ["0", "0"],
}
},
characteristics:[
{
type: "GENERAL",
id: uuid(),
name: "Casa",
icon: "House"
},
{
type: "GENERAL",
id: uuid(),
name: "Precio",
icon: "Price"
},
{
type: "GENERAL",
id: uuid(),
name: "Construida el 12/06/98",
icon: "Building"
},
{
type: "GENERAL",
id: uuid(),
name: "Sup. Construida 100m2",
icon: "Surface"
},
{
type: "GENERAL",
id: uuid(),
name: "Sup. Total 180m2",
icon: "Surface"
},
{
type: "GENERAL",
id: uuid(),
name: "Baรฑos 2",
icon: "Bath"
},
{
type: "GENERAL",
id: uuid(),
name: "Venta",
icon: "Sale"
},
{
type: "GENERAL",
id: uuid(),
name: "Balcon",
icon: "Balcony"
},
{
type: "GENERAL",
id: uuid(),
name: "Habitaciones 3",
icon: "Rooms"
},
{
type: "GENERAL",
id: uuid(),
name: "Pisos 2",
icon: "Floor"
},
{
type: "GENERAL",
id: uuid(),
name: "Estacionamientos",
icon: "Parking"
},
{
type: "GENERAL",
id: uuid(),
name: "Gasto comรบn 0",
icon: "Spending"
},
{
type: "GENERAL",
id: uuid(),
name: "Calefaciรณn",
icon: "Heating"
},
{
type: "OTHERS",
id: uuid(),
name: "Baรฑo de Servicio",
icon: "Extra"
},
{
type: "OTHERS",
id: uuid(),
name: "<NAME>",
icon: "Extra"
},
{
type: "OTHERS",
id: uuid(),
name: "Balcรณn",
icon: "Extra"
},
{
type: "OTHERS",
id: uuid(),
name: "Cerca a areas verdes",
icon: "Extra"
},
{
type: "OTHERS",
id: uuid(),
name: "<NAME>",
icon: "Extra"
},
{
type: "OTHERS",
id: uuid(),
name: "Seguridad 24 hs",
icon: "Extra"
},
{
type: "OTHERS",
id: uuid(),
name: "Cuarto de servicio",
icon: "Extra"
},
{
type: "OTHERS",
id: uuid(),
name: "Pรกtio",
icon: "Extra"
},
{
type: "OTHERS",
id: uuid(),
name: "Piscina",
icon: "Extra"
},
{
type: "OTHERS",
id: uuid(),
name: "Chimenea",
icon: "Extra"
},
{
type: "OTHERS",
id: uuid(),
name: "<NAME>",
icon: "Extra"
},
{
type: "OTHERS",
id: uuid(),
name: "Terraza",
icon: "Extra"
},
]
,
relatedUser:{
id: uuid(),
firstName: "Adrian",
lastName: "Carcamo",
description: "Ingeniero Comercial, Master en Finanzas. Inversor inmobiliario, con 6 aรฑos de experiencia en Banca, Mesa de Dinero. 6 aรฑos en el corretaje de propiedades, especializado en el manejo de cartera de propiedades. ",
avatar: "/team-member-1.jpg",
email:"<EMAIL>",
phone: "+56 9 5555 5555",
jobTitle: "Ejecutivo comercial"
},
};
}
static makeFilters = (filters) => {
let url = '';
let i = 0;
console.log("MAKE FILTES", filters)
for(let key in filters){
i++;
if(filters[key] === "all"){
continue;
}
const more = i !== 1 ? '&' : '';
url = url + more + `${key}=${filters[key]}`;
}
return url;
}
static paginateProperties = (filters)=> new Promise(async(resolve, reject) => {
try{
const url = makePropertiesFilters(filters);
console.log("URLO URL ", url);
const data = await fetch(url);
const result = await data.json();
console.log("URLO URL RESULT", result);
resolve(result);
}catch(e){
console.log("paginateProperties error:", e);
reject(e)
}
});
}<file_sep>import React from "react";
import Layout from "../_layout";
import Hero from "../_sections/home/hero";
import Properties from "../_sections/home/properties";
import About from "../_sections/home/about";
import Customers from "../_sections/home/customers";
import Contact from "../_sections/home/contact";
export default function Home() {
return (
<Layout>
<Hero />
<Properties />
<About />
<Customers title='NUESTROS CLIENTES CONFรAN EN NOSOTROS' />
<Contact />
</Layout>
);
}
<file_sep>import React, { useContext } from 'react';
import OfficeContext from '../../_context';
import styled from 'styled-components';
import { Container } from 'react-grid-system';
const HeroCont = styled.div`
/*background-image: linear-gradient(to bottom, rgba(0, 0, 0, .7), rgba(0, 0, 0, .7)), url(${props => props.theme.news.hero.background });
background-size: cover;
background-position: center;
background-repeat: no-repeat;*/
`
const InnerCont = styled.div`
//height: 50vh;
display: flex;
justify-content: flex-start;
align-items: center;
margin: 0;
padding: 2rem;
background-color: ${props => props.theme.main.primaryColor};
@media(min-width: 768px){
}
`
const HeroTitle = styled.h1`
color: #fff;
font-weight: 300;
font-size: 36px;
width: 100%;
text-align: left;
@media(min-width: 768px){
text-align: left;
font-size: 50px;
width: 50%;
}
`
export default ()=>{
const state = useContext(OfficeContext);
return(
<HeroCont>
<Container>
<InnerCont>
<HeroTitle>
{state.news.hero.title}
</HeroTitle>
</InnerCont>
</Container>
</HeroCont>
)
}<file_sep>import React, { useContext, Fragment } from 'react';
import Context from '../../_context';
import { Container, Row, Col } from 'react-grid-system';
import styled from 'styled-components';
import { DescCarousel } from '../../_components/carousels';
const ImageContainer = styled.div`
/*background-color: ${props => props.theme.main.primaryColor};
@media(min-width: 768px){
padding: 3rem 0;
}*/
`
const Image = styled.img`
object-position: center;
object-fit: cover;
width: 100%;
height: 100%;
`
const Carousel = styled.div`
background-color: #F7F7F7;
min-height: 99%;
//padding-bottom: 2rem;
@media(min-width: 768px){
//padding: 4rem;
//padding-top: 20%;
}
`
const CarouselInnerCont = styled.div`
position: relative;
`
export default ()=> {
const state = useContext(Context).about;
return(
<Container>
<Row nogutter>
<Col xs={12} md={12}>
<Carousel>
<CarouselInnerCont>
<DescCarousel />
</CarouselInnerCont>
</Carousel>
</Col>
</Row>
</Container>
)
}<file_sep>import './src/_styles/global.css';<file_sep>import React, { useContext } from "react";
import Context from "../../_context";
import styled from "styled-components";
import { Container, Row, Col } from "react-grid-system";
import { PropertyCarousel } from "../../_components/carousels";
import { Rectangular } from "../../_components/banners";
import { navigate } from "gatsby";
const MainCont = styled.section`
//margin-top: ${(props) => (props.noMargin ? "4rem" : "13rem")};
padding: 4rem 0;
z-index: -999;
@media(min-width:768px){
padding: 4rem 0;
}
`;
const Title = styled.h2`
//color: ${(props) => props.theme.main.main.primaryColor};
margin: 0;
margin-bottom: 4rem;
font-weight: 300;
@media(min-width:768px){
}
`;
export default ({ noMargin }) => {
const state = useContext(Context).home.properties;
return (
<>
<MainCont id='properties' noMargin={noMargin}>
<Container>
<Row>
{state && state.items.lenght != 0 && (
<>
{console.log("state", state.items.length)}
<Col xs={12} style={{ zIndex: "-1" }}>
<Title>{state.title}</Title>
</Col>
<Col xs={12}>
<PropertyCarousel />
</Col>
</>
)}
<Col xs={12}>
<Rectangular
image={state.bannerImage}
buttonText={state.buttonText}
title={state.footer}
icon='/icons/marker.svg'
onClick={() => navigate("/properties")}
/>
</Col>
</Row>
</Container>
</MainCont>
</>
);
};
<file_sep>export { default as FormProperty } from './form-property';
export { default as FormCode } from './form-code';
export { default as FilterForm } from './filter-form';<file_sep>import styled from 'styled-components';
import { hexToHsl } from '../../_util';
export default styled.button`
min-width: 160px;
padding: 0 1rem;
min-height: 44px;
width: ${props => props.block && "100%"};
border: 1px solid #FFFFFF;
border-radius: 6px;
cursor: pointer;
display: flex;
justify-content: center;
align-items: center;
background-color: ${props => props.primary ? props.theme.main.primaryColor : "transparent"};
border-color: ${props => props.primary || props.outlined ? props.theme.main.primaryColor : "#fff"};
color: ${props => props.outlined ? props.theme.main.primaryColor : "#FFFFFF"};
transition: 250ms ease;
box-shadow: ${props => props.shadow && "0px 0px 1px rgba(0, 0, 0, .12), 0px 0px 2px rgba(0, 0, 0, .12), 0px 4px 4px rgba(0, 0, 0, .12), 0px 8px 8px rgba(0, 0, 0, .12)"};
&:hover{
background-color: ${props => props.primary ? hexToHsl(props.theme.main.primaryColor, 55) : props.outlined ? props.theme.main.primaryColor : "#fff" };
color: ${props => props.primary ? "#fff" : props.outlined ? "#fff" : props.theme.main.primaryColor};
};
&:active{
background-color: ${props => props.primary ? hexToHsl(props.theme.main.primaryColor, 45) : hexToHsl("#ffffff", 90) };
color: ${props => props.primary ? "#fff" : props.theme.main.primaryColor};
}
`<file_sep>import React, { useContext, Fragment } from "react";
import styled from "styled-components";
import { chunkArray } from "../../_util";
import Context from "../../_context";
import { PropertyCard } from "../cards";
import { Visible, Hidden } from "react-grid-system";
import {
CarouselProvider,
Slider,
Slide,
ButtonBack,
ButtonNext,
Dot,
} from "pure-react-carousel";
import { Row, Col } from "react-grid-system";
import "pure-react-carousel/dist/react-carousel.es.css";
import { ArrowLeftOutlined, ArrowRightOutlined } from "@ant-design/icons";
import { v1 as uuid } from "uuid";
const SvgCont = styled.svg`
fill: ${(props) => props.theme.main.primaryColor};
`;
const StyledDot = styled(Dot)`
height: 10px;
width: 10px;
border-radius: 50%;
border: none;
transition: 250ms ease;
background-color: #e4e4e4;
&:nth-child(2n - 1) {
margin: 0 1rem;
}
&:disabled {
background-color: ${(props) => props.theme.main.primaryColor};
height: 14px;
width: 14px;
}
`;
const StyledButtonBack = styled(ButtonBack)`
color: #d3d3d3;
&:hover {
color: ${(props) => props.theme.main.primaryColor};
}
`;
const StyledButtonNext = styled(ButtonNext)`
color: #d3d3d3;
&:hover {
color: ${(props) => props.theme.main.primaryColor};
}
`;
export default () => {
const color = useContext(Context).main.primaryColor;
const items = useContext(Context).home.properties.items;
const itemsDesk = chunkArray(
items.map((item) => item),
3
);
console.log(itemsDesk.length);
return (
<Fragment>
<Hidden xs>
<CarouselProvider
naturalSlideWidth={100}
naturalSlideHeight={50}
//isIntrinsicHeight={true}
totalSlides={itemsDesk.length}
visibleSlides={1}
orientation='horizontal'
>
<Slider>
{itemsDesk.map((mainItem, index) => (
<Slide key={uuid()} index={index}>
<Row style={{ margin: "0 1rem" }}>
{mainItem.map((item) => (
<Col xs={1} md={4}>
<PropertyCard {...item} />
</Col>
))}
</Row>
</Slide>
))}
</Slider>
<StyledButtonBack className='carousel-back-button'>
<ArrowLeftOutlined />
{/* <SvgCont width="8" height="14" fill="none" version="1.1" viewBox="0 0 8 14" xmlns="http://www.w3.org/2000/svg">
<path d="m0.28783 6.3069 6.0345-6.0196c0.38387-0.38312 1.0062-0.38312 1.3899 0 0.38371 0.38278 0.38371 1.0036 0 1.3863l-5.3396 5.3264 5.3394 5.3262c0.38371 0.383 0.38371 1.0037 0 1.3865-0.38371 0.3829-1.006 0.3829-1.3899 0l-6.0345-6.0197c-0.19186-0.19148-0.28767-0.44217-0.28767-0.69299 0-0.25094 0.096005-0.50181 0.28783-0.6932z"/>
</SvgCont>*/}
</StyledButtonBack>
<StyledButtonNext className='carousel-next-button'>
<ArrowRightOutlined />
{/* <SvgCont width="8" height="14" fill="none" version="1.1" viewBox="0 0 8 14" xmlns="http://www.w3.org/2000/svg">
<path d="m7.7122 7.6931-6.0345 6.0196c-0.38387 0.3831-1.0062 0.3831-1.3899 0-0.38371-0.3828-0.38371-1.0036 0-1.3864l5.3396-5.3264-5.3394-5.3262c-0.38371-0.38293-0.38371-1.0037 0-1.3865 0.38371-0.38293 1.0061-0.38293 1.3899 0l6.0345 6.0197c0.19185 0.19148 0.28767 0.44217 0.28767 0.69299 0 0.25094-0.096 0.50181-0.28783 0.6932z"/>
</SvgCont>*/}
</StyledButtonNext>
</CarouselProvider>
</Hidden>
<Visible xs>
<CarouselProvider
naturalSlideWidth={100}
naturalSlideHeight={130}
//isIntrinsicHeight={true}
totalSlides={items.length}
visibleSlides={1}
orientation='horizontal'
>
<Slider>
{items.map((item, index) => (
<Slide key={item.id} index={index}>
<PropertyCard {...item} />
</Slide>
))}
</Slider>
</CarouselProvider>
</Visible>
</Fragment>
);
};
<file_sep>import React, { Fragment } from 'react';
import styled from 'styled-components';
import { Hidden, Container, Visible } from 'react-grid-system';
import RateBar from './rate-bar';
import NavDesktop from './nav-desktop';
import NavMovil from './nav-movil';
const MainCont = styled.header`
color: #fff;
position: fixed;
top: 0;
left: 0;
width: 100%;
z-index: 1500;
background-color: ${props => props.theme.main.primaryColor};
@media(min-width: 768px){
position: absolute;
background-color: ${props => props.dark ? props => props.theme.main.primaryColor : "transparent"};
}
`
export default ({ dark })=> {
return(
<Fragment>
<Hidden xs>
<MainCont dark={dark}>
<Container>
<NavDesktop dark={dark} />
</Container>
</MainCont>
</Hidden>
<Visible xs>
<MainCont>
<Container>
<NavMovil />
</Container>
</MainCont>
</Visible>
</Fragment>
)
}<file_sep>import React, { useContext } from "react";
import context from "../../_context";
import Link from "../link";
import styled from "styled-components";
import { truncate, FormatCurrency } from "../../_util";
const CardCont = styled.div`
z-index: -1;
background-color: #fff;
display: flex;
flex-direction: column;
align-items: center;
border: 1px solid #ebebeb;
max-height: 500px;
transition: 250ms ease;
box-shadow: 0px 2px 10px rgba(0, 0, 0, 0.108337);
width: 100%;
//margin:0 .3rem;
&:hover {
//box-shadow: 0px 2px 22px rgba(0, 0, 0, 0.108337);
box-shadow: 0px 1px 1px rgba(0, 0, 0, 0.12), 0px 2px 2px rgba(0, 0, 0, 0.12),
0px 4px 4px rgba(0, 0, 0, 0.12), 0px 8px 8px rgba(0, 0, 0, 0.12),
0px 16px 16px rgba(0, 0, 0, 0.12), 0px 32px 32px rgba(0, 0, 0, 0.12);
}
@media (max-width: 768px) {
max-height: 1500px;
}
`;
const CardImage = styled.div`
background-image: url("${(props) => props.src}");
background-position: center;
background-size: cover;
background-repeat: none;
width: 100%;
padding-top: 75%;
`;
const CardInfo = styled.div`
padding: 1rem 1rem 1.5rem 1rem;
width: 100%;
height: 100%;
display: flex;
flex-direction: column;
justify-content: space-between;
color: #212121;
`;
const CardTitleCont = styled.ul`
list-style: none;
padding: 0;
margin: 0;
font-size: 14px;
`;
const CardTitle = styled.li`
font-size: 1.2rem;
margin-bottom: 0.5rem;
font-weight: bold;
`;
const CardPrice = styled.li`
color: ${(props) => props.theme.main.primaryColor};
font-size: 1.2rem;
font-weight: bold;
margin-bottom: 0.5rem;
`;
const CardOperation = styled.span`
//font-weight: bold;
`;
const CardCharacteristics = styled.ul`
list-style: none;
//padding: 1rem;
color: #919191;
font-size: 14px;
`;
const CharItem = styled.li`
margin-bottom: 0.5rem;
`;
const Divider = styled.span`
height: 1px;
width: 100%;
background-color: #ebebeb;
`;
export default ({
mainImage,
title,
value,
currency,
code,
ubication,
characteristics,
_id,
}) => {
const builderId = useContext(context).builderId;
return (
<Link
to={`/property?builderId=${builderId}&propertyId=${_id}`}
title='Ver propiedad'
>
<CardCont>
<CardImage src={mainImage} />
<CardInfo>
<CardTitleCont>
<CardTitle>{truncate(title, 30)}</CardTitle>
<CardPrice>{`${currency} ${FormatCurrency(
currency,
value
)}`}</CardPrice>
<li style={{ margin: "1rem 0" }}>
<CardOperation>Venta - </CardOperation>
<span>cod {code}</span>
</li>
</CardTitleCont>
<CardCharacteristics>
<CharItem>{truncate(ubication.address, 30)}</CharItem>
{characteristics.slice(0, 2).map((char, index) => (
<CharItem key={index}>
<span>
{char.name} {char.value} {char.name === "Sup. Total" && "mt2"}
</span>
</CharItem>
))}
</CardCharacteristics>
</CardInfo>
</CardCont>
</Link>
);
};
<file_sep>export { default as PropertyCarousel } from './property';
export { default as ServiceCarousel } from './service';
export { default as ReviewCarousel } from './review';
export { default as DescCarousel } from './description';<file_sep>import React, { useCallback, useReducer, useEffect, useRef } from "react";
import Layout from "../_layout";
import Hero from "../_sections/property/hero";
import PropertyUser from "../_sections/property/property-user";
import Ubication from "../_sections/property/ubication";
import Properties from "../_sections/home/properties";
import { Row, Col, Container, Visible } from "react-grid-system";
import Contact from "../_sections/property/property-user/user";
import Interaction from "../_sections/property/interaction-buttons";
import styled from "styled-components";
import { useQueryParam } from "gatsby-query-params";
import { urlBuilder } from "../_util";
import { LoadingOutlined, FrownOutlined } from "@ant-design/icons";
import PrintProperty from "../_sections/property/printProperty";
const StandCont = styled.div`
min-height: 50vh;
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
font-size: 2rem;
color: ${(props) => props.loading && props.theme.main.primaryColor};
`;
export default ({ location }) => {
const { pathname } = location;
const propertyId = useQueryParam("propertyId");
const [query, setQuery] = useReducer(
(current, next) => ({ ...current, ...next }),
{
loading: true,
error: false,
data: null,
}
);
const getProperty = useCallback(async () => {
try {
const url = urlBuilder("https://api.clasihome.com/rest/properties", {
propertyId,
});
const data = await fetch(url);
const result = await data.json();
console.log("PROPETY URL", url);
console.log("#PROPERTY DATA", result);
setQuery({ loading: false, data: result });
} catch (e) {
console.log(e);
setQuery({ loading: false, error: true });
}
}, [propertyId]);
useEffect(() => {
if (propertyId) {
getProperty();
}
}, [propertyId]);
const { data, loading, error } = query;
if (loading)
return (
<Layout>
<StandCont loading>
<LoadingOutlined />
<p>Cargando...</p>
</StandCont>
</Layout>
);
if (error)
return (
<Layout>
<StandCont>
<FrownOutlined />
<p>Error de conexiรณn</p>
</StandCont>
</Layout>
);
return (
<Layout dark={pathname === "/property" ? true : false}>
<Hero state={data} />
<PropertyUser state={data} />
<Ubication coordinates={data.ubication.location.coordinates} />
<Properties noMargin />
<Visible xs>
<Container>
<Row>
<Col xs={12}>
<Contact description={data} />
</Col>
</Row>
</Container>
</Visible>
</Layout>
);
};
<file_sep>import React, { useContext, Fragment } from 'react';
import Context from '../../_context';
import styled from 'styled-components';
import { Visible, Hidden } from 'react-grid-system';
import { CarouselProvider, Slider, Slide, ButtonBack, ButtonNext, Dot } from 'pure-react-carousel';
import { LeftCircleFilled, RightCircleFilled } from '@ant-design/icons';
import 'pure-react-carousel/dist/react-carousel.es.css';
const DescriptioneCont = styled.div`
padding: 2rem;
//color: #fff;
`
const DescriptionTitle = styled.h2`
//color: #fff;
`
const DescriptionDescription = styled.p`
//color: #fff;
`
const Description = ({ id, description, title }) => {
return(
<DescriptioneCont>
<DescriptionTitle>
{title}
</DescriptionTitle>
<DescriptionDescription>
{description}
</DescriptionDescription>
</DescriptioneCont>
)
}
export default ()=> {
const state = useContext(Context);
const items = state.about.description.items;
const color = state.main.primaryColor;
return(
<Fragment>
<Hidden xs>
<CarouselProvider
naturalSlideWidth={100}
//naturalSlideHeight={50}
isIntrinsicHeight={true}
totalSlides={items.length}
visibleSlides={2}
orientation="horizontal"
>
<Slider>
{
items.map((item, index) => (
<Slide key={item.id} index={index}>
<Description {...item} />
</Slide>
))
}
</Slider>
<ButtonBack className="carousel-back-button carousel-desc-back-button">
<LeftCircleFilled style={{ color, fontSize: 26 }} />
</ButtonBack>
<ButtonNext className="carousel-next-button carousel-desc-next-button">
<RightCircleFilled style={{ color, fontSize: 26 }} />
</ButtonNext>
{/*
Array(items.length).fill(0).map((_,i) => <Dot style={{ backgroundColor: color }} className="carousel-text-dot" key={i} slide={i} />)
*/}
</CarouselProvider>
</Hidden>
<Visible xs>
<CarouselProvider
naturalSlideWidth={100}
isIntrinsicHeight={true}
totalSlides={items.length}
visibleSlides={1}
orientation="horizontal"
>
<Slider>
{
items.map((item, index) => (
<Slide key={item.id} index={index}>
<Description {...item} />
</Slide>
))
}
</Slider>
<ButtonBack className="carousel-back-button carousel-desc-back-button" style={{ backgroundColor: color }}>
<img src="/icons/chevron-left.svg" alt="chevron" />
</ButtonBack>
<ButtonNext className="carousel-next-button carousel-desc-next-button" style={{ backgroundColor: color }}>
<img src="/icons/chevron-right.svg" alt="chevron"/>
</ButtonNext>
</CarouselProvider>
</Visible>
</Fragment>
)
}<file_sep>import React from 'react';
import { MapContainer, TileLayer, Marker } from 'react-leaflet';
export default ({ lat, lng, height, zoom })=> {
if (typeof window !== 'undefined') {
return(
<MapContainer
center={{ lat, lng }}
zoom={zoom}
scrollWheelZoom={false}
style={{ height }}
>
<TileLayer
attribution='© <a href="http://osm.org/copyright">OpenStreetMap</a> contributors'
url="https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png"
/>
<Marker position={[lat, lng]}/>
</MapContainer>
);
}
return <span>a</span>;
}<file_sep>import React from 'react';
import ImageGallery from 'react-image-gallery';
import 'react-image-gallery/styles/css/image-gallery.css';
export default ({ images })=> {
console.log(images);
const gallery = images.map(item => ({ original: item.url, thumbnail: item.url }));
return(
<ImageGallery
items={gallery}
showPlayButton={false}
/>
)
}<file_sep>import React, { useContext, useReducer, useState } from "react";
import Context from "../../_context";
import styled from "styled-components";
import { Container, Row, Col } from "react-grid-system";
import { Input, Textarea } from "../../_components/inputs";
import { Button } from "../../_components/buttons";
import Map from "../../_components/map";
import { CheckCircleFilled, LoadingOutlined } from "@ant-design/icons";
const MainCont = styled.div`
min-height: 80vh;
display: flex;
flex-direction: column;
justify-content: center;
align-items: flex-start;
margin-top: 4rem;
background-color: ${(props) => props.theme.main.primaryColor};
`;
const Title = styled.h1`
background-color: ${(props) => props.theme.main.primaryColor};
color: #fff;
margin: 0;
font-size: 30px;
padding: 2rem;
margin: 0;
@media (min-width: 768px) {
padding: 4rem;
}
`;
const SubTitle = styled.p``;
const Form = styled.form`
padding: 2rem;
//border-radius: 8px;
width: 100%;
margin: 0;
background-color: ${(props) => props.theme.main.primaryColor};
/*box-shadow: 0px 1px 1px rgba(0, 0, 0, .12),
0px 2px 2px rgba(0, 0, 0, .12),
0px 4px 4px rgba(0, 0, 0, .12),
0px 8px 8px rgba(0, 0, 0, .12);*/
@media (min-width: 768px) {
padding: 4rem;
padding-top: 0;
margin: 0;
}
`;
const ImgCaptcha = styled.img`
//width: 100%;
margin-bottom: 1rem;
`;
const MailSpan = styled.span`
color: #fff;
text-decoration: underline;
`;
const SubTitleFooter = styled(SubTitle)`
//color: #fff;
`;
const ButtonContainer = styled.div`
//margin-top: 32px;
display: flex;
//justify-content: flex-start;
align-items: center;
@media (min-width: 768px) {
justify-content: flex-end;
}
`;
const SuccessText = styled.p`
margin: 0;
background: #28a745;
margin-top: 1rem;
font-size: 1rem;
padding: 20px;
color: #fff;
display: flex;
align-items: center;
justify-content: center;
`;
export default () => {
const state = useContext(Context);
const [loading, setLoading] = useState(false);
const [success, setSuccess] = useState(false);
const [values, setValues] = useReducer(
(current, next) => ({ ...current, ...next }),
{
name: "",
email: "",
mobile: "",
message: "",
}
);
const handleChange = (e) => {
setValues({ [e.target.id]: e.target.value });
};
const onSubmit = async (e) => {
e.preventDefault();
setLoading(true);
try {
const options = {
headers: { "Content-type": "application/json" },
method: "POST",
body: JSON.stringify(values),
mode: "cors",
};
const data = await fetch("/sendmail.php", options);
const result = await data.text();
console.log(result);
if (result.includes("success")) {
setValues({
name: "",
mobile: "",
email: "",
message: "",
});
setLoading(false);
setSuccess(true);
setTimeout(() => {
setSuccess(false);
}, 5000);
}
setLoading(false);
} catch (e) {
setLoading(false);
console.log("error", e);
}
};
return (
<MainCont>
<Container>
<Row nogutter>
<Col xs={12} md={12}>
<Title>ยฟDudas? ยฟConsultas? Estamos aquรญ para ayudarlo</Title>
<Form onSubmit={onSubmit}>
<Row>
<Col xs={12} md={6}>
<Row>
<Col xs={12}>
<Input
placeholder='Nombre'
disabled={loading}
id='name'
onChange={handleChange}
value={values.name}
vertical
shadow
/>
</Col>
<Col xs={12}>
<Input
placeholder='Telรฉfono'
disabled={loading}
id='mobile'
onChange={handleChange}
value={values.mobile}
vertical
shadow
/>
</Col>
<Col xs={12}>
<SubTitleFooter style={{ color: "#fff" }}>
Tambiรฉn puede escribirnos a{" "}
<MailSpan><EMAIL></MailSpan>
</SubTitleFooter>
</Col>
</Row>
</Col>
<Col xs={12} md={6}>
<Row>
<Col xs={12}>
<Input
placeholder='Email'
disabled={loading}
id='email'
onChange={handleChange}
value={values.email}
vertical
shadow
/>
</Col>
<Col xs={12}>
<Textarea
rows='7'
placeholder='Mensaje'
gray
disabled={loading}
id='message'
onChange={handleChange}
value={values.message}
vertical
shadow
/>
</Col>
<Col xs={12}>
<ButtonContainer>
<Button block rounded disabled={loading}>
Enviar
{loading && (
<LoadingOutlined style={{ marginLeft: "1rem" }} />
)}
</Button>
</ButtonContainer>
</Col>
<Col xs={12}>
{success && (
<SuccessText>
Su mensaje fue enviado con รฉxito{" "}
<CheckCircleFilled style={{ marginLeft: ".3rem" }} />
</SuccessText>
)}
</Col>
</Row>
</Col>
</Row>
</Form>
</Col>
<Col xs={12} md={12}>
{state.lat && (
<Map
lat={parseFloat(state.lat)}
lng={parseFloat(state.lng)}
height={300}
zoom={3}
/>
)}
</Col>
</Row>
</Container>
</MainCont>
);
};
<file_sep>import React from "react";
import Layout from "../_layout";
import Hero from "../_sections/about/hero";
import History from "../_sections/about/history";
import Description from "../_sections/about/description";
import Stats from "../_sections/about/stats";
import Team from "../_sections/about/team";
import Ubication from "../_sections/about/ubication";
export default () => (
<Layout>
<Hero />
<History />
<Description />
<Stats />
{/* <Team />
<Ubication /> */}
</Layout>
);
<file_sep>import React, { useContext, Fragment } from 'react';
import Context from '../../_context';
import styled from 'styled-components';
import { Visible, Hidden } from 'react-grid-system';
import { CarouselProvider, Slider, Slide, ButtonBack, ButtonNext, Dot } from 'pure-react-carousel';
import 'pure-react-carousel/dist/react-carousel.es.css';
import { ArrowLeftOutlined, ArrowRightOutlined } from '@ant-design/icons';
const RevieweCont = styled.div`
position: relative;
padding: 2rem;
//background-color: ${props => props.theme.main.primaryColor};
color: #fff;
display: flex;
flex-direction: column;
align-items: center;
justify-content: flex-start;
min-height: 300px;
@media(min-width: 768px){
flex-direction: row;
min-height: 50vh;
align-items: center;
justify-content: space-around;
}
`
const ReviewInnerCont = styled.div`
@media(min-width: 768px){
width: 50%;
}
`
const ReviewAuthor = styled.p`
font-weight: bold;
text-align: center;
margin-top: 4rem;
`
const ReviewDescription = styled.p`
text-align: center;
`
const SvgCont = styled.svg`
margin: 2rem 0;
fill: ${props => props.theme.main.primaryColor};
`
const DotsCont = styled.div`
position: absolute;
width: 100%;
display: flex;
justify-content: center;
`
const QuotationCont = styled.div`
position: absolute;
top: -2rem;
left: 4rem;
`
const StyledDot = styled(Dot)`
height: 2px;
width: 2rem;
//border-radius: 50%;
border: none;
transition: 250ms ease;
background-color: #e4e4e4;
&:nth-child(2n - 1){
margin: 0 1rem;
}
&:disabled{
background-color: ${props => props.theme.main.primaryColor};
}
`
const StyledButtonBack = styled(ButtonBack)`
position: absolute;
top: 50%;
left: 1rem;
background-color: transparent;
border: none;
outline: none;
font-size: 1.5rem;
color: #D3D3D3;
&:hover{
color: ${props => props.theme.main.primaryColor};
}
`
const StyledButtonNext = styled(ButtonNext)`
position: absolute;
top: 50%;
right: 1rem;
background-color: transparent;
border: none;
outline: none;
font-size: 1.5rem;
color: #D3D3D3;
&:hover{
color: ${props => props.theme.main.primaryColor};
}
`
const Review = ({ id, review, author }) => {
return(
<RevieweCont>
<ReviewInnerCont>
<ReviewDescription>
{review}
</ReviewDescription>
<ReviewAuthor>
{author}
</ReviewAuthor>
</ReviewInnerCont>
</RevieweCont>
)
}
export default ()=> {
const state = useContext(Context);
const items = state.home.reviews.items;
const color = state.main.primaryColor;
return(
<Fragment>
<Hidden xs>
<CarouselProvider
naturalSlideWidth={100}
isIntrinsicHeight={true}
totalSlides={items.length}
visibleSlides={1}
orientation="horizontal"
isPlaying={true}
>
<Slider>
{
items.map((item, index) => (
<Slide key={item.id} index={index}>
<Review {...item} />
</Slide>
))
}
</Slider>
<StyledButtonBack>
<ArrowLeftOutlined />
</StyledButtonBack>
<StyledButtonNext>
<ArrowRightOutlined />
</StyledButtonNext>
</CarouselProvider>
</Hidden>
<Visible xs>
<CarouselProvider
naturalSlideWidth={100}
isIntrinsicHeight={true}
totalSlides={items.length}
visibleSlides={1}
orientation="horizontal"
isPlaying={true}
>
<Slider>
{
items.map((item, index) => (
<Slide key={item.id} index={index}>
<Review {...item} />
</Slide>
))
}
</Slider>
{
Array(items.length).fill(0).map((_,i) => <StyledDot /*style={{ backgroundColor: color }} className="carousel-text-dot"*/ key={i} slide={i} />)
}
</CarouselProvider>
</Visible>
</Fragment>
)
}<file_sep>import React from 'react';
import styled from 'styled-components';
import { Row, Col } from 'react-grid-system';
import { Select, Input } from '../inputs';
import { Button, IconButton } from '../buttons';
import { Visible, Hidden } from 'react-grid-system';
const Form = styled.form`
width: 100%;
border-radius: 6px;
padding: 0 15px;
margin-bottom: 1rem;
@media(min-width: 768px){
width: 100%;
padding: 0;
padding-left: 5px;
background-color: #fff;
box-shadow: ${props => props.shadow && "0px 0px 1px rgba(0, 0, 0, .12), 0px 0px 2px rgba(0, 0, 0, .12), 0px 4px 4px rgba(0, 0, 0, .12), 0px 8px 8px rgba(0, 0, 0, .12)"};
margin-bottom:0;
}
`
export default ({ block, shadow })=> {
return(
<Form onSubmit={(e) => e.preventDefault()} block={block} shadow={shadow}>
<Row gutterWidth={32} align="center">
<Col xs={12} md={2}>
<Input placeholder="Desde" />
</Col>
<Col xs={12} md={2}>
<Input placeholder="Hasta" />
</Col>
<Col xs={12} md={2}>
<Select
default="Dormitorios"
options={["opcion 1", "opcion 2", "opcion 3"]}
primary
/>
</Col>
<Col xs={12} md={2}>
<Select
default="Baรฑos"
options={["opcion 1", "opcion 2", "opcion 3"]}
primary
/>
</Col>
<Col xs={12} md={2}>
<Select
default="Divisas"
options={["opcion 1", "opcion 2", "opcion 3"]}
primary
/>
</Col>
<Col xs={12} md={2}>
<Hidden xs>
<IconButton primary style={{ height: 60 }}>
Aplicar
</IconButton>
</Hidden>
<Visible xs>
<Button primary block>
Aplicar
</Button>
</Visible>
</Col>
</Row>
</Form>
)
}<file_sep>import React from 'react';
import styled from 'styled-components';
import Link from '../../_components/link';
const CardCont = styled.div`
background-color: #fff;
border: 1px solid #EBEBEB;
`
const CardImage = styled.img`
object-fit: cover;
object-position: center;
width: 100%;
`
const CardTag = styled.span`
width: 86px;
height: 27px;
border-radius: 20px;
background-color: ${props => props.theme.main.primaryColor};
color: #fff;
display: flex;
justify-content: center;
align-items: center;
`
const CardDescription = styled.div`
padding: 1rem;
`
const CardTitle = styled.p`
font-weight: bold;
`
const CardDate = styled.span`
color: #919191;
font-size: 14px;
`
export default ({ image, title, tag, date })=>{
return(
<Link to="/new">
<CardCont>
<CardImage src={image} alt={title} />
<CardDescription>
<CardTag>
{tag}
</CardTag>
<CardTitle>
{title}
</CardTitle>
<CardDate>
{date}
</CardDate>
</CardDescription>
</CardCont>
</Link>
)
}<file_sep>export { default as Rectangular } from './rectangular';<file_sep>import { useEffect, useCallback, useReducer } from 'react';
export default ()=> {
const [query, setQuery] = useReducer((current, next) => ({ ...current, ...next }), {
data: null,
error: false,
loading: true,
});
const getData = useCallback(async(url)=>{
try{
const data = await fetch(url);
const result = await data.json();
return result;
}
catch(e){
console.log(e);
}
});
const getAllData = useCallback(async()=> {
const localData = window.localStorage.getItem("indicators");
if( !localData || localData && Date.now() > new Date(localData.date)){
try{
const urls = ["https://mindicador.cl/api/uf", "https://mindicador.cl/api/utm", "https://mindicador.cl/api/dolar"];
const data = await Promise.all(urls.map(url => getData(url)));
const indicators = {
date: Date.now(),
uf: data[0].serie[0].valor,
utm: data[1].serie[0].valor,
dollar: data[2].serie[0].valor,
};
console.log("ALL DATA RATE TODAY", indicators);
window.localStorage.setItem("indicators", JSON.stringify(indicators));
setQuery({ loading: false, error: false, data: indicators });
}
catch(e){
console.log(e);
setQuery({ loading: false, error: true, data: null });
}
}
else{
console.log("ALL DATA RATE YESTERDAY", JSON.parse(localData).uf);
setQuery({ loading: false, error: false, data: JSON.parse(localData) });
}
});
useEffect(()=>{
if(window !== "undefined"){
getAllData();
}
},[]);
return query;
}<file_sep>import React from 'react';
import Icons from '../../_icons';
import styled from 'styled-components';
const Svg<file_sep>import React, { useContext } from "react";
import styled from "styled-components";
import Context from "../../_context";
import { FormProperty } from "../../_components/forms";
import { Container } from "react-grid-system";
import Fade from "react-reveal/Fade";
import RateBar from "../../_layout/header/rate-bar";
import {
CarouselProvider,
Slider,
Slide,
ButtonBack,
ButtonNext,
Dot,
} from "pure-react-carousel";
const VeryMainCont = styled.section`
/* background-image: linear-gradient(
to bottom,
hsl(0deg 0% 0% / 70%),
rgb(0 0 0 / 48%)
),
url(${(props) => props.theme.home.hero.background}); */
background-position: center;
background-size: cover;
background-repeat: no-repeat;
color: #fff;
margin-bottom: 4rem;
`;
const MainCont = styled.div`
display: flex;
min-height: 100vh;
flex-direction: column;
justify-content: center;
align-items: flex-start;
position: relative;
@media (min-width: 768px) {
margin: 0;
min-height: calc(100vh - 32px);
}
`;
const TitleCont = styled.div``;
const Title = styled.h1`
font-weight: 300;
max-width: 95%;
font-size: 32px;
text-align: left;
@media (min-width: 768px) {
max-width: 50%;
font-size: 50px;
}
`;
const DownButton = styled.div`
//text-decoration: none;
position: relative;
width: 100%;
bottom: -42px;
@media (min-width: 768px) {
position: absolute;
bottom: -22px;
}
`;
const RateCont = styled.div``;
const SvgCont = styled.svg`
stroke: #fff;
transition: 250ms ease;
${DownButton}:hover & {
stroke: ${(props) => props.theme.main.primaryColor};
}
`;
const SliderContainer = styled.div`
position: absolute;
top: 0;
left: 0;
height: 100%;
width: 100%;
background-size: cover;
background-position: center;
`;
const SlideImage = styled.div`
width: 100%;
height: 100vh;
background-image: linear-gradient(rgba(0, 0, 0, .5), rgba(0, 0, 0, .5)), url("${(
props
) => props.src}");
background-size: cover;
background-position: bottom;
`;
const BackgroundSlider = ({ theme }) => (
<CarouselProvider
naturalSlideWidth={100}
//naturalSlideHeight={125}
isIntrinsicHeight
totalSlides={4}
isPlaying
interval={5000}
>
{console.log(theme)}
<Slider>
<Slide index={0}>
<SlideImage src='/hero5.jpg' alt='trabajo' />
</Slide>
<Slide index={1}>
<SlideImage src='/hero4.jpg' alt='cocina' />
</Slide>
<Slide index={2}>
<SlideImage src='/hero3.jpg' alt='tasa de tรฉ' />
</Slide>
<Slide index={3}>
<SlideImage src='/hero1.jpg' alt='tasa de tรฉ' />
</Slide>
</Slider>
{/* <CustonDot slide={0} style={{ right: "69px" }} />
<CustonDot slide={1} style={{ right: "46px" }} />
<CustonDot slide={2} style={{ right: "23px" }} />
<CustonDot slide={3} style={{ right: "0px" }} /> */}
</CarouselProvider>
);
export default () => {
const state = useContext(Context);
return (
<VeryMainCont>
<Fade>
{" "}
<SliderContainer>
<BackgroundSlider {...state} />
</SliderContainer>
</Fade>
<Container>
<MainCont>
<Fade cascade center duration={1000}>
<TitleCont>
<Fade duration={3000}>
<Title>{state.home.hero.title}</Title>
</Fade>
{/* <RateCont>
<RateBar />
</RateCont> */}
</TitleCont>
</Fade>
<DownButton>
<Fade bottom>
<FormProperty shadow />
</Fade>
</DownButton>
</MainCont>
</Container>
</VeryMainCont>
);
};
<file_sep>import React, {useContext} from 'react';
import Context from '../../_context';
import Link from '../../_components/link';
import styled from 'styled-components';
const Nav = styled.span`
text-decoration: none;
&:hover{
color: #fff;
}
&:visited{
color: #fff;
}
`
const LogoImg = styled.img`
max-width: 150px;
`
const LogoText = styled.span`
font-size: 1.5rem;
`
export default ({ dark })=> {
const state = useContext(Context);
const builderId = state.builderId;
return(
<Link to={`/?builderId=${builderId}`}>
<Nav title="Inicio">
{
state.main.logo.isImage
?(
<LogoImg src={ dark ? state.main.logoDark.value : state.main.logo.value} />
)
:(
<LogoText>
{state.main.logo.value}
</LogoText>
)
}
</Nav>
</Link>
)
}<file_sep>import { useCallback, useReducer, useLayoutEffect } from 'react';
import { useUrlBuilder } from '../_hooks';
export default ({ location })=> {
const url = useUrlBuilder('https://api.clasihome.com/rest/properties', location);
const [query, setQuery] = useReducer((current, next) => ({...current, ...next}),{
loading: true,
error: false,
data: null,
})
const getProperties = useCallback(async() => {
try{
setQuery({ loading:true })
//const url = location.search ? urlBuilder('https://api.clasihome.com/rest/properties',{...params, id, typeId} ) : urlBuilder('https://api.clasihome.com/rest/properties',{id, typeId} );
//const url = location.search ? 'https://api.clasihome.com/rest/properties' + location.search + "&typeId=" + typeId + "&id=" +id : 'https://api.clasihome.com/rest/properties' + "?typeId=" + typeId + "&id=" +id
const data = await fetch(url);
const result = await data.json();
setQuery({ loading: false, data: result });
}
catch(e){
console.log(e);
setQuery({ loading: false, error: true });
}
},[location]);
useLayoutEffect(()=>{
getProperties();
},[location]);
return query;
}
/**
import { useEffect, useContext, useState, useCallback, useReducer } from 'react';
import context from '../_context';
import { getSearchParams } from 'gatsby-query-params';
import { urlBuilder } from '../_util';
export default ()=> {
const office = useContext(context).office;
const params = getSearchParams();
const [query, setQuery] = useReducer((current, next) => ({...current, ...next}),{
loading: true,
error: false,
data: null,
})
const getProperties = useCallback(async() => {
try{
const { id, typeId } = office;
const url = urlBuilder('https://api.clasihome.com/rest/properties',{...params, id, typeId} );
console.log("PROPERTIES URL", url)
const data = await fetch(url);
const result = await data.json();
setQuery({ loading: false, data: result });
}
catch(e){
console.log(e);
setQuery({ loading: false, error: true });
}
},[office, params])
useEffect(()=>{
getProperties();
},[office, params]);
return query;
}
*/<file_sep>import React, { useEffect, useContext } from 'react';
import styled from 'styled-components';
import OfficeContext from '../../_context';
import { gsap } from 'gsap';
const BarsCont = styled.ul`
margin: 0;
padding: 0;
list-style: none;
`
const Bar = styled.li`
background-color: ${props => props.theme.main.primaryColor};
height: 3px;
width: 19px;
margin-bottom: .25rem;
border-radius: 19px;
`
const Button = styled.button`
position: relative;
z-index: 1500;
background: transparent;
border: none;
outline: none !important;
border-radius: 2px ;
transition: 250ms ease;
cursor: pointer;
@media(min-width: 768px){
display: none;
}
`
export default ({ onClick, visible })=> {
const office = useContext(OfficeContext);
useEffect(()=> {
if(visible){
gsap.to('#responsive-bar-top', .25, { y: 7, rotate: 45, backgroundColor: "#fff" });
gsap.to('#responsive-bar-middle', .25, { backgroundColor: "#fff", opacity: 0 });
gsap.to('#responsive-bar-bottom', .25, { y: -7, rotate: -45, backgroundColor: "#fff" });
} else{
gsap.to('#responsive-bar-top', .25, { y: 0, rotate: 0, backgroundColor: "#fff" });
gsap.to('#responsive-bar-middle', .25, { opacity: 1, backgroundColor: "#fff" });
gsap.to('#responsive-bar-bottom', .25, { y: 0, rotate: 0, backgroundColor: "#fff" });
}
},[visible, "#fff"])
return(
<Button onClick={onClick}>
<BarsCont>
<Bar id="responsive-bar-top" />
<Bar id="responsive-bar-middle" />
<Bar id="responsive-bar-bottom" />
</BarsCont>
</Button>
)
};<file_sep>import React, { useContext } from 'react';
import Context from '../../_context';
import styled from 'styled-components';
import { Container, Row, Col } from 'react-grid-system';
const MainCont = styled.section`
padding: 6rem 0;
//min-height: 100vh;
@media(min-width: 576px){
padding: 6rem 0;
}
`
const HistoryCont = styled.div`
display: flex;
flex-direction: column;
justify-content: flex-start;
align-items: flex-start;
//height: 70vh;
margin: 1rem 0;
@media(min-width: 576px){
padding: 6rem 0;
justify-content: center;
}
`
const Title = styled.h2`
//color: ${props => props.theme.main.primaryColor};
`
const Description = styled.div`
`
const ImageContainer = styled.div`
position: relative;
height: 250px;
margin-top: 4rem;
background-color: ${props => props.theme.main.primaryColor};
@media(min-width: 768px){
height: 100%;
margin-top: 0;
}
`
const Image = styled.img`
width: 100%;
//position: relative;
//bottom: -5px;
top: 0;
@media(min-width: 768px){
height: 100%;
margin-top: 0;
}
`
export default ()=> {
const state = useContext(Context).about;
return(
<MainCont>
<Container>
<Row>
<Col xs={12} md={5}>
<HistoryCont>
<Title>
{state.history.title}
</Title>
<Description dangerouslySetInnerHTML={{__html: state.history.description}} />
</HistoryCont>
</Col>
<Col xs={12} md={7}>
<Image src={state.description.background} alt="historia" />
</Col>
</Row>
</Container>
</MainCont>
)
}<file_sep>import React, { useContext, Fragment, useState } from "react";
import styled from "styled-components";
import Context from "../../_context";
import { FormProperty, FilterForm } from "../../_components/forms";
import { Container } from "react-grid-system";
import RateBar from "../../_layout/header/rate-bar";
import { DownOutlined, UpOutlined } from "@ant-design/icons";
import Zoom from "react-reveal/Zoom";
const VeryMainCont = styled.section`
background-image: linear-gradient(
to bottom,
rgba(0, 0, 0, 0.5),
rgba(0, 0, 0, 0.5)
),
url(${(props) => props.theme.home.hero.background});
background-position: center;
background-size: cover;
background-repeat: no-repeat;
//color: #fff;
`;
const MainCont = styled.div`
display: flex;
min-height: 100vh;
flex-direction: column;
justify-content: center;
align-items: flex-start;
position: relative;
@media (min-width: 768px) {
min-height: calc(100vh - 81px);
}
`;
const Title = styled.h1`
font-weight: 300;
max-width: 95%;
font-size: 32px;
text-align: left;
color: #fff;
@media (min-width: 768px) {
max-width: 50%;
font-size: 50px;
}
`;
const DownButton = styled.div`
//text-decoration: none;
position: absolute;
bottom: 30px;
`;
const SvgCont = styled.svg`
stroke: #fff;
transition: 250ms ease;
${DownButton}:hover & {
stroke: ${(props) => props.theme.main.primaryColor};
}
`;
const MoreButton = styled.button`
margin: 2rem 0;
border: none;
background: transparent;
color: ${(props) => props.theme.main.primaryColor};
transition: 250ms ease;
display: flex;
justify-content: center;
align-items: center;
&:hover {
filter: saturate(5.5);
}
`;
export default () => {
const state = useContext(Context);
const [filter, setFilter] = useState(false);
return (
<Fragment>
<VeryMainCont>
<Container>
<MainCont>
<Title>
<Zoom cascade>Busca tu propiedad</Zoom>
</Title>
<FormProperty shadow filter={filter} />
<DownButton href='#properties'>
{/* <SvgCont width="30" height="30" viewBox="0 0 30 30" fill="none" xmlns="http://www.w3.org/2000/svg">
<circle cx="15" cy="15" r="14.5"/>
<path d="M19.2426 14L15 18.2427L10.7574 14" strokeLinecap="round" strokeLinejoin="round"/>
</SvgCont>*/}
</DownButton>
</MainCont>
</Container>
</VeryMainCont>
<Container>
<MoreButton onClick={() => setFilter(!filter)}>
{filter ? (
<Fragment>
Menos filtros
<UpOutlined style={{ marginLeft: 8 }} />
</Fragment>
) : (
<Fragment>
Mรกs filtros
<DownOutlined style={{ marginLeft: 8 }} />
</Fragment>
)}
</MoreButton>
</Container>
</Fragment>
);
};
<file_sep>import React from "react";
import Context from "../_context";
import data from "../_context/state";
import styled, { ThemeProvider } from "styled-components";
import Header from "./header";
import Footer from "./footer";
import "animate.css";
import { useLayout } from "../_hooks";
import LoaderScreen from "../_components/LoaderScreen";
import { LoadingOutlined } from "@ant-design/icons";
const Layout = styled.div`
overflow: hidden;
`;
const LoadingCont = styled.div`
min-height: 100vh;
background-color: hsl(0, 0%, 23%);
display: flex;
justify-content: center;
align-items: center;
font-size: 2rem;
color: #ffffff;
`;
const Body = styled.div`
position: relative;
//padding-top: 67px;
min-height: 100vh;
@media (min-width: 768px) {
//padding-top: 89px;
}
`;
export default ({ children, dark }) => {
const { loading, data, error } = useLayout();
if (loading)
return (
<LoadingCont>
<LoadingOutlined spin />
</LoadingCont>
);
if (error) return <LoadingCont>Error de conexion</LoadingCont>;
return (
<Context.Provider value={data}>
<ThemeProvider theme={data}>
<Layout>
<Header dark={dark} />
<Body>{children}</Body>
<Footer />
</Layout>
</ThemeProvider>
</Context.Provider>
);
};
<file_sep>import React, { useCallback, useEffect } from 'react';
import styled from 'styled-components';
import { Container } from 'react-grid-system';
import { useGetIndicators } from '../../_hooks';
import { LoadingOutlined } from '@ant-design/icons';
const MainCont = styled.div`
//background-color: ${props => props.theme.main.primaryColor};
color: #fff;
padding: 2rem 0;
font-size: 12px;
user-select: none;
`
const RatesCont = styled.ul`
display: flex;
justify-content: center;
align-items: center;
font-weight: bold;
color: #fff;
@media(min-width: 768px){
font-weight: normal;
justify-content: flex-start;
color: #fff;
}
`
const RateItem = styled.li`
margin-left: .30rem;
&::after{
content: " -"
}
@media(min-width: 768px){
margin-left: .5rem;
&::after{
content: " /"
}
}
`
const RateItemNoAfter = styled(RateItem)`
&::after{
content: ""
}
@media(min-width: 768px){
&::after{
content: ""
}
}
`
export default ()=> {
const { loading, error, data } = useGetIndicators();
if(loading) return(
<MainCont>
<Container>
<RatesCont>
<RateItem>
UF <span><LoadingOutlined /></span>
</RateItem>
<RateItem>
UTM <span><LoadingOutlined /></span>
</RateItem>
<RateItemNoAfter>
Dรณlar <span><LoadingOutlined /></span>
</RateItemNoAfter>
</RatesCont>
</Container>
</MainCont>
);
if(error) return <span>error de conextiรณn</span>
return(
<MainCont>
<Container>
<RatesCont>
<RateItem>
UF {data.uf}
</RateItem>
<RateItem>
UTM {data.utm}
</RateItem>
<RateItemNoAfter>
Dรณlar {data.dollar}
</RateItemNoAfter>
</RatesCont>
</Container>
</MainCont>
)
}<file_sep>import React, { useReducer, useState, useEffect, useContext } from 'react';
import context from '../../_context';
import styled from 'styled-components';
import { LoadingOutlined, SearchOutlined } from '@ant-design/icons';
import { removeAccent, truncate } from '../../_util';
import { navigate } from 'gatsby';
const InputLabel = styled.label`
background-color: #fff;
position: relative;
display: flex;
align-items: center;
height: 44px;
width: 100%;
margin-bottom: 1rem;
border: ${props => props.gray ? "1px solid #000000" : "none" };
padding-right: 16px;
color: ${props => props.primary ? props.theme.main.primaryColor : "#212121"};
border-radius: 6px;
box-shadow: 0px 0px 1px rgba(0, 0, 0, .12), 0px 0px 2px rgba(0, 0, 0, .12), 0px 4px 4px rgba(0, 0, 0, .12), 0px 8px 8px rgba(0, 0, 0, .12);
@media(min-width: 768px){
margin-bottom: ${props => props.vertical ? "1rem" : "0"};
box-shadow: ${props => props.shadow ? "0px 0px 1px rgba(0, 0, 0, .12), 0px 0px 2px rgba(0, 0, 0, .12), 0px 4px 4px rgba(0, 0, 0, .12), 0px 8px 8px rgba(0, 0, 0, .12)" : "none"};
}
`
const Input = styled.input`
background-color: transparent;
//box-shadow: 0px 0px 1px rgba(0, 0, 0, .12), 0px 0px 2px rgba(0, 0, 0, .12), 0px 4px 4px rgba(0, 0, 0, .12), 0px 8px 8px rgba(0, 0, 0, .12);
border-radius: 3px;
padding: 5px;
height: 44px;
width: 100%;
border: none;
font-size: 1rem;
color: ${props => props.primary ? props.theme.main.primaryColor : "#878787"};
&::placeholder{
color: ${props => props.gray ? "#8695A1" : "#5a5a5a"};
}
`
const OptionsMainCont = styled.ul`
background-color: #fff;
color: initial;
width: 100%;
position: absolute;
left: 0;
top: 44px;
padding: 1rem 5px;
border: 1px solid #cecece;
z-index: 100;
`
const Option = styled.button`
background-color: transparent;
border: none;
outline: none;
cursor: pointer;
transition: 250ms ease;
display: flex;
text-align: left;
&:hover{
color: ${props => props.theme.main.primaryColor} !important;
}
`
const PropertyImg = styled.img`
width: 60px;
height: 60px;
border-radius: 2px;
object-fit: cover;
object-position: center;
`
const PropertyInfoCont = styled.div`
display: flex;
flex-direction: column;
padding-left: .5rem;
`
const PropertyInfoCode = styled.span`
font-size: 12px;
color: ${props => props.theme.main.primaryColor};
`
const PropertyInfoTitle = styled.span`
font-size: 14px;
`
const PropertyInfoDescription = styled.span`
font-size: 12px;
color: #666;
`
export default ({ selected, onSelect, id, placeholder, options, gray, shadow, primary }) => {
const contextData = useContext(context);
const [value, setValue] = useState(selected)
const [state, setState] = useReducer((current, next) => ({ ...current, ...next }),{
loading: false,
data: [],
});
useEffect(()=>{
if(selected){
console.log("OPTIONS",)
setValue(selected);
}
},[selected])
const onSearch = async(e) => {
const value = e.target.value;
if(options){
setValue(value);
onSelect(e);
const valueLen = value.length;
const compare = removeAccent(value).toUpperCase();
const newData = valueLen === 0 ? [] : options.filter(item => removeAccent(item).toUpperCase().indexOf(compare) !== -1);
setState({ data: newData });
}
else{
setState({ loading: true });
try{
setValue(value);
const propertiesUrl = `https://api.clasihome.com/rest/properties?id=${contextData.office.id}&typeId=${contextData.office.typeId}&status=PUBLICADA&stringSearch=${value}`;
const data = await fetch(propertiesUrl);
const result = await data.json();
console.log(result);
setState({ data: value.length ? result.properties : [], loading: false });
}
catch(e){
console.log(e);
setState({ loading: false });
}
}
}
const onClick = e => {
setState({ data: [] });
if(options){
setValue(e.target.value);
onSelect(e);
}
else{
console.log("PROPETY ID", e.currentTarget.id);
navigate(`/property?builderId=${contextData.builderId}&propertyId=${e.currentTarget.id}`);
}
}
return (
<InputLabel htmlFor={id} gray={gray} shadow={shadow} primary={primary}>
<Input
id={id}
name={id}
value={value}
onChange={onSearch}
type="text"
placeholder={placeholder}
gray
autoComplete="off"
primary={primary}
/>
{
state.loading
?<LoadingOutlined />
:<SearchOutlined />
}
{
options
?(
state.data.length !== 0 &&
(<OptionsMainCont>
{
state.data.map(item => (
<li>
<Option
type="button"
id={id}
value={item}
onClick={onClick}
>
{item}
</Option>
</li>
))
}
</OptionsMainCont>)
)
:(
state.data.length !== 0 &&
(<OptionsMainCont>
{
state.data.map(item => (
<li>
<Option id={item._id} type="button" onClick={onClick}>
{console.log(item._id)}
<PropertyImg src={item.mainImage} alt={item.code} />
<PropertyInfoCont>
<PropertyInfoCode>
{ item.operation + " - CODE: " + item.code}
</PropertyInfoCode>
<PropertyInfoTitle>
{item.title}
</PropertyInfoTitle>
<PropertyInfoDescription>
{truncate(item.publicObservations, 220)}
</PropertyInfoDescription>
</PropertyInfoCont>
</Option>
</li>
))
}
</OptionsMainCont>)
)
}
</InputLabel>
)
}
|
4d4b3c6e6d468c57ad5f04a8eba25291b5c95347
|
[
"JavaScript",
"Markdown"
] | 54 |
JavaScript
|
clasi2020home/WEB-gprb
|
311187f4b5355f41768d49e46eb55d3aabb1d85a
|
54f1bb6e3b194d380ff488d38d57f0b85a44015b
|
refs/heads/master
|
<file_sep>package mygame.enemy;
import javafx.scene.SnapshotParameters;
import javafx.scene.canvas.GraphicsContext;
import javafx.scene.image.ImageView;
import javafx.scene.paint.Color;
import javafx.scene.transform.Rotate;
import mygame.Bullet;
import mygame.Config;
import mygame.Entity;
import mygame.GameField;
import mygame.tile.Road;
import javafx.scene.image.Image;
import mygame.tile.Spawner;
import mygame.tile.Target;
import java.awt.*;
public class Enemy extends Entity {
static private int dx[] = {0, 0, -1, 1};
static private int dy[] = {-1, 1, 0, 0};
private int velocityX;
private int velocityY;
public int getVelocityX() {
return velocityX;
}
public void setVelocityX(int velocityX) {
this.velocityX = velocityX;
}
public int getVelocityY() {
return velocityY;
}
public void setVelocityY(int velocityY) {
this.velocityY = velocityY;
}
private int maximumHitPoint;
public void setMaximumHitPoint(int maximumHitPoint) {
this.maximumHitPoint = maximumHitPoint;
}
private int hitPoint;
private int speed = 1;
private int armor;
private int reward;
public Enemy() {
}
public Enemy(int x, int y) {
super(x, y);
}
public Enemy(int x, int y, GameField field) {
super(x, y, field);
}
public int getHitPoint() {
return hitPoint;
}
public void setHitPoint(int hitPoint) {
this.hitPoint = hitPoint;
}
public int getSpeed() {
return speed;
}
public void setSpeed(int speed) {
this.speed = speed;
}
public int getArmor() {
return armor;
}
public void setArmor(int armor) {
this.armor = armor;
}
public int getReward() {
return reward;
}
public void setReward(int reward) {
this.reward = reward;
}
public boolean checkCollisionWithBullet(Bullet b, long currentNanoSecond){
double x = b.calculateCurrentPositionX(currentNanoSecond);
double y = b.calculateCurrentPositionY(currentNanoSecond);
x += Config.TILE_SIZE / 2.0;
y += Config.TILE_SIZE / 2.0;
int topLeftX = super.getX() / Config.TILE_SIZE * Config.TILE_SIZE;
int topLeftY = super.getY() / Config.TILE_SIZE * Config.TILE_SIZE;
if (x >= topLeftX && x < topLeftX+Config.TILE_SIZE && y>=topLeftY&&y<topLeftY+Config.TILE_SIZE){
return true;
}
return false;
}
public boolean checkHitByBulletAndRemove(long currentNanoSecond){
for(Entity e : super.getField().getEntities()){
if (e instanceof Bullet && e.isAlive()){
if (checkCollisionWithBullet((Bullet)e, currentNanoSecond)){
hitPoint -= ((Bullet) e).getDamage();
if (hitPoint <= 0){
super.setAlive(false);
}
e.setAlive(false);
return true;
}
}
}
return false;
}
public void move() {
if (super.getX()==getField().getTargetX() && super.getY()==getField().getTargetY()){
super.setAlive(false);
}
int topLeftX = super.getX() / Config.TILE_SIZE * Config.TILE_SIZE;
int topLeftY = super.getY() / Config.TILE_SIZE * Config.TILE_SIZE;
// TODO: add speed
if (super.getX() % Config.TILE_SIZE == 0 && super.getY() % Config.TILE_SIZE == 0) {
boolean broken = false;
for (int i = 0; i < 4 && !broken; ++i) {
if (speed*dx[i] == -getVelocityX() && speed*dy[i] == -getVelocityY())
continue;
for (Entity e : super.getField().getEntities())
if (e instanceof Road) {
if (topLeftX + Config.TILE_SIZE * dx[i] == e.getX()
&& topLeftY + Config.TILE_SIZE * dy[i] == e.getY()
// find next tile to start the route
) {
setDirection(dx[i], dy[i]);
setVelocityX(dx[i]*speed);
setVelocityY(dy[i]*speed);
super.setX(super.getX() + dx[i]*speed);
super.setY(super.getY() + dy[i]*speed);
broken = true;
break;
}
}
}
}
else {
// if not in the intersection of grid, continue going
int u = super.getX() + getVelocityX();
int v = super.getY() + getVelocityY();
int downRightX = topLeftX + Config.TILE_SIZE - 1;
int downRightY = topLeftY + Config.TILE_SIZE - 1;
u = Math.min(u, downRightX + 1);
v = Math.min(v, downRightY + 1);
u = Math.max(u, topLeftX);
v = Math.max(v, topLeftY);
super.setX(u);
super.setY(v);
}
}
private void drawHPBar(GraphicsContext gc){
double barX = super.getX()+0.2*Config.TILE_SIZE;
double barY = super.getY();
double barW = Config.TILE_SIZE - 0.4*Config.TILE_SIZE;
double barH = 4;
gc.setFill(Color.GREY);
gc.fillRect(barX, barY, barW, barH);
gc.setFill(Color.ORANGERED);
double hitPointPercentage = hitPoint*1.0 / maximumHitPoint;
gc.fillRect(barX, barY, hitPointPercentage*barW, barH);
}
public void draw(GraphicsContext gc){
super.draw(gc);
// draw HP bar
drawHPBar(gc);
}
}
<file_sep>package mygame;
import mygame.tile.RandomEntity;
import mygame.tile.Spawner;
import mygame.tile.Target;
import java.util.List;
//Field quan li tat ca cac Entity cua Stage
public class GameField {
private int spawnerX, spawnerY, targetX, targetY;
private List<Entity> entities;
public int getSpawnerX() {
return spawnerX;
}
public void setSpawnerX(int spawnerX) {
this.spawnerX = spawnerX;
}
public int getSpawnerY() {
return spawnerY;
}
public void setSpawnerY(int spawnerY) {
this.spawnerY = spawnerY;
}
public int getTargetX() {
return targetX;
}
public void setTargetX(int targetX) {
this.targetX = targetX;
}
public int getTargetY() {
return targetY;
}
public void setTargetY(int targetY) {
this.targetY = targetY;
}
public List<Entity> getEntities() {
return entities;
}
public void addEntity(Entity e){
this.entities.add(e);
}
public GameField(GameStage stage) {
this.entities = stage.getEntities();
for(Entity E : this.entities){
E.setField(this);
if (E instanceof Spawner){
System.out.println("FOUND SPAWNER!");
this.setSpawnerX(E.getX());
this.setSpawnerY(E.getY());
}
if (E instanceof Target){
System.out.println("FOUND TARGET!");
this.setTargetX(E.getX());
this.setTargetY(E.getY());
}
}
for (int i = 0; i < Config.RANDOM_ENTITIES_NUM; ++i){
RandomEntity randomEntity = new RandomEntity(this);
entities.add(randomEntity);
}
}
}
<file_sep>package mygame.enemy;
import javafx.scene.SnapshotParameters;
import javafx.scene.canvas.GraphicsContext;
import javafx.scene.image.Image;
import javafx.scene.image.ImageView;
import javafx.scene.paint.Color;
import javafx.scene.transform.Rotate;
import mygame.Config;
import mygame.GameField;
public class TankerEnemy extends Enemy {
private Image straightGunImage, gunImage;
public TankerEnemy(int x, int y, GameField field) {
super(x, y, field);
super.setMaximumHitPoint(20);
super.setHitPoint(20);
setImage(Config.ENEMY_TANKER_BASE);
this.gunImage = this.straightGunImage = Config.ENEMY_TANKER_GUN;
setSpeed(1);
setReward(20);
}
public void setDirection(int dx, int dy){
super.setDirection(dx, dy);
ImageView iv = new ImageView(straightGunImage);
Rotate rotation = new Rotate();
rotation.setPivotX(0);
rotation.setPivotY(0);
if (dx == 0 && dy == 1) {
rotation.setAngle(90);
}
else if (dx == 0 && dy == -1){
rotation.setAngle(-90);
}
else if (dx == 1 && dy == 0){
rotation.setAngle(0);
}
else if (dx == -1 && dy == 0){
rotation.setAngle(-180);
}
iv.getTransforms().add(rotation);
SnapshotParameters params = new SnapshotParameters();
params.setFill(Color.TRANSPARENT);
this.gunImage = iv.snapshot(params, null);
}
public void draw(GraphicsContext gc){
super.draw(gc);
gc.drawImage(gunImage, getX(), getY());
}
}
<file_sep>package mygame;
import mygame.tile.*;
import java.io.File;
import java.io.FileNotFoundException;
import java.util.ArrayList;
import java.util.List;
import java.util.Scanner;
// Stage dinh nghia trang thai bat dau cua 1 Field
public class GameStage {
private List<Entity> entities = new ArrayList<Entity>();
// fixed height = 7;
// fixed width = 10;
public List<Entity> getEntities() {
return entities;
}
public GameStage(List<Entity> entities) {
this.entities = entities;
}
public static GameStage load(String stageDir) throws FileNotFoundException {
List<Entity> entities;
entities = new ArrayList<Entity>();
Scanner scanner = new Scanner(new File(stageDir));
for (int i = 0; i < Config.TILE_VERTICAL; ++i) {
for (int j = 0; j < Config.TILE_HORIZONTAL; ++j) {
int type = scanner.nextInt();
if (type == 0) {
entities.add(new Road(j * Config.TILE_SIZE, i * Config.TILE_SIZE));
}
else if (type == 1){
entities.add(new Mountain(j * Config.TILE_SIZE, i * Config.TILE_SIZE));
}
else if (type == 2){
entities.add(new Spawner(j * Config.TILE_SIZE, i * Config.TILE_SIZE));
}
else if (type == 3){
entities.add(new Target(j * Config.TILE_SIZE, i * Config.TILE_SIZE));
}
}
}
return new GameStage(entities);
}
}
<file_sep>package mygame;
import javafx.scene.image.Image;
import javafx.util.Pair;
import mygame.enemy.Enemy;
import mygame.tile.tower.Tower;
public class Bullet extends Entity {
private int damage;
private int speed;
private double targetX, targetY;
private double sourceX, sourceY;
private double directionX, directionY;
private long firedTime;
private final double BULLET_SPEED_UNIT = 100;
private double degree;
public int getSpeed() {
return speed;
}
public void setSpeed(int speed) {
this.speed = speed;
}
public long getFiredTime() {
return firedTime;
}
public void setFiredTime(long firedTime) {
this.firedTime = firedTime;
}
public Bullet () {}
public double getDirectionX() {
return directionX;
}
public double getDirectionY() {
return directionY;
}
public Bullet (int x, int y, Tower source, Enemy target, long firedTime) {
super(x, y);
this.damage = source.getDamage();
this.speed = 5;
this.firedTime = firedTime;
this.targetX = target.getX();// + Config.TILE_SIZE / 2.0;
this.targetY = target.getY();// + Config.TILE_SIZE / 2.0; // aim the center of enemy
this.sourceX = source.getX();
this.sourceY = source.getY();
this.directionX = this.targetX - this.sourceX;
this.directionY = this.targetY - this.sourceY;
/*
System.out.print("target X: ");
System.out.println(this.targetX);
System.out.print("target Y: ");
System.out.println(this.targetY);
System.out.print("source X: ");
System.out.println(this.sourceX);
System.out.print("source Y: ");
System.out.println(this.sourceY);
*/
if (directionX == 0) {
if (directionY < 0) degree = -90;
else degree = 90;
}
else {
double k = directionY / directionX;
if (k >= 0) {
degree = Math.atan(k) * 180 / Math.PI;
if (directionX < 0) degree -= 180;
} else if (k < 0) {
degree = 180 - Math.atan(-k) * 180 / Math.PI;
if (directionX > 0) degree -= 180;
}
}
double normalisingConstant = Math.sqrt(directionY*directionY+directionX*directionX);
directionX /= normalisingConstant;
directionY /= normalisingConstant;
setImage(Config.BULLET_IMAGE);
}
public double getDegree() {
return degree;
}
public double calculateCurrentPositionX(long currentTime){
long elapsedTime = currentTime - firedTime;
return (sourceX + directionX * elapsedTime * (speed * BULLET_SPEED_UNIT * 1e-9));
}
public double calculateCurrentPositionY(long currentTime){
long elapsedTime = currentTime - firedTime;
return (sourceY + directionY * elapsedTime * (speed * BULLET_SPEED_UNIT * 1e-9));
}
public int getDamage() {
return damage;
}
public void setDamage(int damage) {
this.damage = damage;
}
public boolean goesOutOfBound(){
double cx = getX() + Config.TILE_SIZE/2.0, cy=getY()+Config.TILE_SIZE/2.0;
return cx<0 || cx>Config.TILE_SIZE*Config.TILE_HORIZONTAL-7.5
||cy<0||cy>Config.TILE_SIZE*Config.TILE_VERTICAL-7.5;
}
}
<file_sep>๏ปฟ# Tower Defense
- Project Bร i tแบญp lแปn mรดn Lแบญp trรฌnh hฦฐแปng ฤแปi tฦฐแปฃng cแปงa nhรณm CsPhAi
Nhรณm em gแปm:
- <NAME>
- <NAME>
## Tรญnh nฤng game
- **Quรขn ฤแปch**: Mแปi loแบกi quรขn ฤแปch cรณ cรกc chแป sแป khรกc nhau vแป mรกu, tแปc ฤแป di chuyแปn vร phแบงn thฦฐแปng (tiแปn) nhแบญn ฤฦฐแปฃc khi bแป tiรชu diแปt. Cรณ 4 loแบกi quรขn ฤแปch:
- Normal Enemy: Quรขn ฤแปch thฦฐแปng
- Tanker Enemy: Quรขn ฤแปch nhiแปu mรกu, di chuyแปn chแบญm
- Smaller Enemy: Quรขn ฤแปch cรณ kรญch thฦฐแปc nhแป, di chuyแปn nhanh.
- Boss Enemy: Quรขn ฤแปch cรณ rแบฅt nhiแปu mรกu vร di chuyแปn rแบฅt chแบญm.
- **Thรกp**: Mแปi thรกp cรณ cรกc chแป sแป khรกc nhau vแป giรก mua, sรกt thฦฐฦกng, tแบงm bแบฏn, tแปc ฤแป bแบฏn. Cรณ 3 loแบกi thรกp:
- Normal Tower: Thรกp thฦฐแปng, cรกc chแป sแป แป mแปฉc trung bรฌnh.
- Sniper Tower: Thรกp bแบฏn tแปa, bแบฏn chแบญm, tแบงm bแบฏn xa nhฦฐng sรกt thฦฐฦกng cao.
- Machine Gun Tower: Thรกp liรชn thanh, bแบฏn nhanh, tแบงm bแบฏn ngแบฏn, sรกt thฦฐฦกng thแบฅp
- **Balance**: Lฦฐแปฃng tiแปn hiแปn cรณ.
- Tiแปn dรนng ฤแป mua thรกp.
- Khi tiรชu diแปt quรขn ฤแปch, tiแปn sแบฝ ฤฦฐแปฃc tฤng lรชn tรนy vร o loแบกi quรขn ฤแปch.
- Mแบกng cแปงa ngฦฐแปi chฦกi: Khi bแบฏt ฤแบงu trรฒ chฦกi, ngฦฐแปi chฦกi sแบฝ cรณ 5 mแบกng (tฦฐฦกng แปฉng vแปi 5 trรกi tim). Mแปi lแบงn cรณ 1 quรขn ฤแปch ฤi ฤฦฐแปฃc ฤแบฟn ฤiแปm kแบฟt thรบc thรฌ ngฦฐแปi chฦกi sแบฝ mแบฅt 1 mแบกng.
- **Luแบญt chฦกi:**
- Trรฒ chฦกi cรณ 4 levels. Trรฒ chฦกi bแบฏt ฤแบงu khi ngฦฐแปi chฦกi nhแบฅn vร o nรบt *Start*.
- Ngฦฐแปi chฦกi tiแบฟn hร nh ฤแบทt thรกp vร o bแบฃn ฤแป ฤแป thรกp tiรชu diแปt quรขn ฤแปch.
- Ngฦฐแปi chฦกi sแบฝ chiแบฟn thแบฏng khi vฦฐแปฃt qua cแบฃ 4 levels.
- Ngฦฐแปi chฦกi sแบฝ thua cuแปc nแบฟu sแป mแบกng cแปงa ngฦฐแปi chฦกi giแบฃm xuแปng 0.
## Coding
**Khi khแปi ฤแปng trรฒ chฦกi:**
- Trong hร m main chรญnh sแบฝ tแบกo ra mแปt sรขn khแบฅu (sence), mแปt ฤแปi tฦฐแปฃng GameController vร mแปt ฤแปi tฦฐแปฃng GUIBuilder.
- ฤแปi tฦฐแปฃng GameController nร y sแบฝ load dแปฏ liแปu bแบฃn ฤแป vร truyแปn vร o trong ฤแปi tฦฐแปฃng GameField.
- Sau khi load bแบฃn ฤแป, ฤแปi tฦฐแปฃng GameField sแบฝ sinh ra ngแบซu nhiรชn cรกc bแปฅi cรขy, hรฒn sแปi,... vร o chรญnh nรณ (ฤแป trang trรญ :P)
- ฤแปi tฦฐแปฃng GUIBuilder sแบฝ khแปi tแบกo cรกc tรญnh nฤng hiแปn thแป trรชn sรขn khแบฅu.
- Ngฦฐแปi chฦกi sแบฝ tฦฐฦกng tรกc vแปi sรขn khแบฅu.
**Sau khi bแบฅm Start:**
- ฤแปi tฦฐแปฃng GameController sแบฝ load dแปฏ liแปu vแป level tฦฐฦกng แปฉng, trong ฤรณ bao gแปm map vร sแป lฦฐแปฃng cรกc loแบกi quรขn ฤแปch.
- ฤแปi tฦฐแปฃng GameController sแบฝ quแบฃn lรฝ vรฒng lแบทp cแปงa game, nรณ sแบฝ kiแปm soรกt tแบฅt cแบฃ cรกc sแปฑ kiแปn diแป
n ra nhฦฐ:
- Thแปi gian cooldown cแปงa Tower
- Kiแปm tra va chแบกm giแปฏa Bullet vร Enemy, giแบฃm mรกu Enemy vร dแปn dแบนp cรกc ฤแปi tฦฐแปฃng ฤรฃ biแบฟn mแบฅt hoแบทc chแบฟt
- Kiแปm soรกt sแปฑ thay ฤแปi cแปงa ฤiแปm, tiแปn vร mรกu cแปงa ngฦฐแปi chฦกi
- Kiแปm tra vร dแปซng vรฒng lแบทp trong trฦฐแปng hแปฃp thแบฏng hoแบทc thua
- ...
**ฦฏu ฤiแปm**
- Game chแบกy แปn ฤแปnh, khรดng bแป giแบญt.
**Nhฦฐแปฃc ฤiแปm**
- Chฦฐa cรณ nhiแปu tรญnh nฤng
**Cรกc vแบฅn ฤแป khi gแบทp phแบฃi vร hฦฐแปng giแบฃi quyแบฟt ฤรฃ sแปญ dแปฅng khi code game:**
- Mรกy bay ฤi vฦฐแปฃt xe tฤng thรฌ lร m cรกch nร o ฤแป ฤแบฃm bแบฃo mรกy bay khรดng bแป xe tฤng ฤรจ lรชn khi render?
- Mรกy bay vฦฐแปฃt xe tฤng chแปฉng tแป mรกy bay ฤฦฐแปฃc sinh ra sau xe tฤng. Vร trong list cรกc entities thรฌ cรกc vแบญt thแป sแบฝ ฤฦฐแปฃc thรชm vร o cuแปi list cho nรชn ฤแบฃm bแบฃo ฤฦฐแปฃc thแปฉ tแปฑ sinh ra theo thแปi gian trong list. Khi GameController render hรฌnh แบฃnh thรฌ sแบฝ render tแปซ ฤแบงu tแปi cuแปi list cho nรชn sแบฝ ฤแบฃm bแบฃo thแปฉ tแปฑ lร vแบญt bแป ฤรจ lรชn sแบฝ ฤฦฐแปฃc render ra trฦฐแปc.
- Quรขn ฤแปch tแปฑ tรฌm ฤฦฐแปng nhฦฐ thแบฟ nร o?
- Quรขn ฤแปch tรฌm theo 4 hฦฐแปng, hฦฐแปng nร o ฤi vร o mแปt Tile lร Road thรฌ nรณ sแบฝ chแปn hฦฐแปng ฤรณ vร ฤi hแบฟt Tile ฤรณ. Sau khi ฤi hแบฟt Tile ฤรณ nรณ sแบฝ lแบทp lแบกi thแปง tแปฅc trรชn vแปi ฤiแปu kiแปn hฦฐแปng ฤฦฐแปฃc chแปn tiแบฟp theo khรดng phแบฃi hฦฐแปng cลฉ (khรดng quay ฤแบงu lแบกi).
<file_sep>package mygame;
import java.io.File;
import java.io.FileNotFoundException;
import java.util.Scanner;
public class Level {
private int id;
private int numberOfNormalEnemy;
private int numberOfTankerEnemy;
private int numberOfSmallerEnemy;
private int numberOfBossEnemy;
private int enemyLeft;
private int reward;
public Level () {
this.id = 0;
enemyLeft = 0;
}
public int getEnemyLeft() {
return enemyLeft;
}
public void setEnemyLeft(int enemyLeft) {
this.enemyLeft = enemyLeft;
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public int getNumberOfNormalEnemy() {
return numberOfNormalEnemy;
}
public void setNumberOfNormalEnemy(int numberOfNormalEnemy) {
this.numberOfNormalEnemy = numberOfNormalEnemy;
}
public int getNumberOfTankerEnemy() {
return numberOfTankerEnemy;
}
public void setNumberOfTankerEnemy(int numberOfTankerEnemy) {
this.numberOfTankerEnemy = numberOfTankerEnemy;
}
public int getNumberOfSmallerEnemy() {
return numberOfSmallerEnemy;
}
public void setNumberOfSmallerEnemy(int numberOfSmallerEnemy) {
this.numberOfSmallerEnemy = numberOfSmallerEnemy;
}
public int getNumberOfBossEnemy() {
return numberOfBossEnemy;
}
public void setNumberOfBossEnemy(int numberOfBossEnemy) {
this.numberOfBossEnemy = numberOfBossEnemy;
}
public int getReward() {
return reward;
}
public void setReward(int reward) {
this.reward = reward;
}
public void loadNextLevel() throws FileNotFoundException {
setId(getId() + 1);
System.out.println(id);
Scanner scanner = new Scanner(new File("src/stage/stage" + String.valueOf(getId()) + ".txt"));
numberOfNormalEnemy = scanner.nextInt();
numberOfTankerEnemy = scanner.nextInt();
numberOfSmallerEnemy = scanner.nextInt();
numberOfBossEnemy = scanner.nextInt();
enemyLeft = numberOfBossEnemy + numberOfSmallerEnemy + numberOfTankerEnemy + numberOfNormalEnemy;
}
public boolean createdAllEnemy() {
return numberOfBossEnemy == 0 && numberOfNormalEnemy == 0 && numberOfSmallerEnemy == 0 && numberOfTankerEnemy == 0;
}
public boolean isFinished() {
return enemyLeft == 0;
}
}
<file_sep>package mygame;
import javafx.animation.AnimationTimer;
import javafx.scene.canvas.GraphicsContext;
import javafx.scene.image.ImageView;
import javafx.scene.text.Text;
import mygame.enemy.*;
import mygame.tile.tower.Tower;
import java.io.FileNotFoundException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
public class GameController extends AnimationTimer{
private GameField field;
private GraphicsContext gc;
private Player player = new Player();
private GUIBuilder gui;
private final long startNanoTime = System.nanoTime();
private long lastEnemyGenerationTime = 0;
private boolean started = false;
private boolean gameOver = false;
private Level level = new Level();
public boolean isStarted() {
return started;
}
public void setStarted(boolean started) {
this.started = started;
}
public boolean isGameOver() {
return gameOver;
}
public void setGameOver(boolean gameOver) {
this.gameOver = gameOver;
}
public GameController(GraphicsContext gc) throws FileNotFoundException {
this.field = new GameField(GameStage.load("src/stage/map.txt"));
this.gc = gc;
}
public GUIBuilder getGui() {
return gui;
}
public void setGui(GUIBuilder gui) {
this.gui = gui;
}
public Player getPlayer() {
return player;
}
public void setPlayer(Player player) {
this.player = player;
}
public GameField getField() {
return field;
}
public void setField(GameField field) {
this.field = field;
}
private void removeNullEntities(){
Iterator itr = field.getEntities().iterator();
while (itr.hasNext()) {
Entity e = (Entity)(itr.next());
if (!e.isAlive()) {
if (e instanceof Enemy) level.setEnemyLeft(level.getEnemyLeft() - 1 );
itr.remove();
}
else
e.draw(gc);
}
}
private void checkGameOverAndStop(){
for (Entity e : field.getEntities())
if (e instanceof Enemy && !e.isAlive()) {
if (((Enemy) e).getHitPoint() <= 0)
player.setReward(player.getReward() + ((Enemy) e).getReward());
else {
if (player.getRemainingHearts() > 0) {
ImageView[] newHeartsStatus = gui.getHearts();
newHeartsStatus[player.getRemainingHearts() - 1].setImage(Config.HEART_DEAD_IMAGE);
gui.setHearts(newHeartsStatus);
player.setRemainingHearts(player.getRemainingHearts() - 1);
}
if (player.getRemainingHearts() == 0) {
gui.gameOver();
setGameOver(true);
}
}
}
}
private void handleEnemiesMoving(long currentNanoTime){
if (!started) return;
//ENEMY CREATING
if (currentNanoTime - lastEnemyGenerationTime >= Config.elapsedTimeBetweenEnemy[level.getId()] && !level.createdAllEnemy()){
boolean createdOne = false;
while (!createdOne) {
int x = (int)(Math.random() * 4);
if (x == 0 && level.getNumberOfNormalEnemy() > 0) {
field.addEntity(new NormalEnemy(field.getSpawnerX(), field.getSpawnerY(), field));
createdOne = true;
level.setNumberOfNormalEnemy(level.getNumberOfNormalEnemy() - 1);
}
if (x == 1 && level.getNumberOfTankerEnemy() > 0) {
field.addEntity(new TankerEnemy(field.getSpawnerX(), field.getSpawnerY(), field));
createdOne = true;
level.setNumberOfTankerEnemy(level.getNumberOfTankerEnemy() - 1);
}
// tao xong Smaller vs Boss thi xoa cmt di la chay dc
if (x == 2 && level.getNumberOfSmallerEnemy() > 0) {
field.addEntity(new SmallerEnemy(field.getSpawnerX(), field.getSpawnerY(), field));
createdOne = true;
level.setNumberOfSmallerEnemy(level.getNumberOfSmallerEnemy() - 1);
}
if (x == 3 && level.getNumberOfBossEnemy() > 0) {
field.addEntity(new BossEnemy(field.getSpawnerX(), field.getSpawnerY(), field));
createdOne = true;
level.setNumberOfBossEnemy(level.getNumberOfBossEnemy() - 1);
}
}
lastEnemyGenerationTime = currentNanoTime;
}
//ENEMY MOVING
for(Entity e : field.getEntities()){
if (e instanceof Enemy)
((Enemy) e).move();
}
}
private void handleTowersShooting(long currentNanoTime){
//TOWER MOVING
List<Bullet> createdBullet = new ArrayList<Bullet>();
for (Entity t : field.getEntities()) {
if (t instanceof Tower)
//if it's time to fire
if ( currentNanoTime - ((Tower) t).getLastBulletGenerationTime() > (long) 5e8 / ((Tower) t).getAttackSpeed()) {
((Tower) t).setLastBulletGenerationTime(currentNanoTime);
//find nearest enemy
Enemy nearestEnemy = null; //non-exist Enemy
for (Entity e : field.getEntities()) {
if (e instanceof Enemy && ((Enemy)e).isAlive())
if (nearestEnemy == null ||
(t.distance(e) <= ((Tower) t).getAttackRange() && t.distance(e) < t.distance(nearestEnemy)))
nearestEnemy = (Enemy) e;
}
if (nearestEnemy == null) continue;
if (t.distance(nearestEnemy) > ((Tower) t).getAttackRange()) continue;
//create a bullet
Bullet tmp = ((Tower)t).fire(nearestEnemy, currentNanoTime);
createdBullet.add(tmp);
}
}
for (Bullet b : createdBullet) {
field.getEntities().add(b);
}
}
private void handleEnemiesGettingHit(long currentNanoTime){
for(Entity e : field.getEntities( )) {
if (e.isAlive()) {
if (e instanceof Enemy) {
((Enemy) e).checkHitByBulletAndRemove(currentNanoTime);
}
if (e instanceof Bullet) {
e.setX((int) ((Bullet) e).calculateCurrentPositionX(currentNanoTime));
e.setY((int) ((Bullet) e).calculateCurrentPositionY(currentNanoTime));
if (((Bullet) e).goesOutOfBound())
e.setAlive(false);
}
}
}
}
private void handleRewardAnnouncement(){
gui.getRewardAnnouncement().setText("BALANCE: " + String.valueOf(player.getReward()) + "$");
}
private void handleCurrentLevel() {
gui.getLevelAnnouncement().setText("LEVEL: " + String.valueOf(level.getId()));
}
private void handleFinishLevel() throws FileNotFoundException {
if (level.isFinished()) {
if (level.getId() == Config.maximumLevels) {
gui.victory();
this.stop();
}
else
level.loadNextLevel();
}
//Stage Finish Announcement
//gui.stageFinishAnnouncement();
//Next Stage Handler
}
@Override
public void handle(long currentNanoTime) {
if (gameOver) this.stop();
try {
this.handleFinishLevel();
} catch (FileNotFoundException e) {
e.printStackTrace();
}
this.checkGameOverAndStop();
this.removeNullEntities();
this.handleEnemiesMoving(currentNanoTime);
this.handleTowersShooting(currentNanoTime);
this.handleEnemiesGettingHit(currentNanoTime);
this.handleRewardAnnouncement();
this.handleCurrentLevel();
}
@Override
public void start() {
super.start();
}
}
<file_sep>package mygame.tile.tower;
import javafx.scene.SnapshotParameters;
import javafx.scene.image.ImageView;
import javafx.scene.paint.Color;
import mygame.Config;
public class SniperTower extends Tower {
public SniperTower (int x, int y) {
super(x, y);
setImage(Config.TOWER_SNIPER_IMAGE);
setDamage(Config.TOWER_SNIPER_DAMAGE);
setAttackRange(Config.TOWER_SNIPER_ATTACK_RANGE);
setAttackSpeed(Config.TOWER_SNIPER_ATTACK_SPEED);
setPrice(Config.TOWER_SNIPER_PRICE);
ImageView iv = new ImageView(super.getStraightImage());
iv.setRotate(90);
SnapshotParameters params = new SnapshotParameters();
params.setFill(Color.TRANSPARENT);
super.setImage(iv.snapshot(params, null));
}
}
<file_sep>package mygame;
import javafx.scene.image.Image;
public class Config {
public static final String GAME_NAME = "TowerDefense by <NAME>";
public static final int TILE_SIZE = 64;
public static final int TILE_HORIZONTAL = 11;
public static final int TILE_VERTICAL = 10;
public static final int SCREEN_WIDTH = TILE_SIZE * (TILE_HORIZONTAL + 5);
public static final int SCREEN_HEIGHT = TILE_SIZE * TILE_VERTICAL;
//TILE
public static final Image ROAD_IMAGE = new Image("file:src/resources/AssetsKit_2/PNG/Default size/towerDefense_tile172.png");
public static final Image MOUNTAIN_IMAGE = new Image("file:src/resources/AssetsKit_2/PNG/Default size/towerDefense_tile231.png");
public static final Image SPAWNER_IMAGE = new Image("file:src/resources/AssetsKit_2/PNG/Default size/towerDefense_tile172.png");
public static final Image TARGET_IMAGE = new Image("file:src/resources/AssetsKit_2/PNG/Default size/towerDefense_tile059.png");
public static final Image TOWER_BASE_IMAGE = new Image("file:src/resources/AssetsKit_2/PNG/Default size/towerDefense_tile181.png");
public static final Image ENEMY_NORMAL = new Image("file:src/resources/AssetsKit_2/PNG/Default size/towerDefense_tile270.png");
public static final Image ENEMY_NORMAL_SHADOW = new Image("file:src/resources/AssetsKit_2/PNG/Default size/towerDefense_tile293.png");
public static final Image ENEMY_BOSS = new Image("file:src/resources/AssetsKit_2/PNG/Default size/towerDefense_tile271.png");
public static final Image ENEMY_BOSS_SHADOW = new Image("file:src/resources/AssetsKit_2/PNG/Default size/towerDefense_tile294.png");
public static final int RANDOM_ENTITIES_NUM = 30;
public static final Image BUSH_IMAGE_1 = new Image("file:src/resources/AssetsKit_2/PNG/Default size/towerDefense_tile131.png");
public static final Image BUSH_IMAGE_2 = new Image("file:src/resources/AssetsKit_2/PNG/Default size/towerDefense_tile132.png");
public static final Image STONE_IMAGE_1 = new Image("file:src/resources/AssetsKit_2/PNG/Default size/towerDefense_tile135.png");
public static final Image STONE_IMAGE_2 = new Image("file:src/resources/AssetsKit_2/PNG/Default size/towerDefense_tile136.png");
public static final Image DUST_IMAGE_1 = new Image("file:src/resources/AssetsKit_2/PNG/Default size/towerDefense_tile019.png");
public static final Image DUST_IMAGE_2 = new Image("file:src/resources/AssetsKit_2/PNG/Default size/towerDefense_tile020.png");
public static final Image ENEMY_TANKER_GUN = new Image("file:src/resources/AssetsKit_2/PNG/Default size/towerDefense_tile292.png");
public static final Image ENEMY_TANKER_BASE = new Image("file:src/resources/AssetsKit_2/PNG/Default size/towerDefense_tile268.png");
public static final Image ENEMY_SMALLER = new Image("file:src/resources/AssetsKit_2/PNG/Default size/towerDefense_tile245.png");
public static final Image TOWER_NORMAL_IMAGE = new Image("file:src/resources/AssetsKit_2/PNG/Default size/towerDefense_tile226.png");
public static final double TOWER_NORMAL_ATTACK_SPEED = 0.66;
public static final int TOWER_NORMAL_ATTACK_RANGE = 3 * TILE_SIZE;
public static final int TOWER_NORMAL_DAMAGE = 2;
public static final int TOWER_NORMAL_PRICE = 25;
public static final Image TOWER_MACHINE_GUN_IMAGE = new Image("file:src/resources/AssetsKit_2/PNG/Default size/towerDefense_tile250.png");
public static final double TOWER_MACHINE_GUN_ATTACK_SPEED = 2;
public static final int TOWER_MACHINE_GUN_ATTACK_RANGE = 2 * TILE_SIZE;
public static final int TOWER_MACHINE_GUN_DAMAGE = 1;
public static final int TOWER_MACHINE_GUN_PRICE = 50;
public static final Image TOWER_SNIPER_IMAGE = new Image("file:src/resources/AssetsKit_2/PNG/Default size/towerDefense_tile249.png");
public static final double TOWER_SNIPER_ATTACK_SPEED = 0.15;
public static final int TOWER_SNIPER_ATTACK_RANGE = 5 * TILE_SIZE;
public static final int TOWER_SNIPER_DAMAGE = 5;
public static final int TOWER_SNIPER_PRICE = 35;
//BULLET
public static final Image BULLET_IMAGE = new Image("file:src/resources/AssetsKit_2/PNG/Default size/towerDefense_tile272.png");
//GUI -- TOWER INFO
public static final int TOWER_INFO_DAMAGE_X = TILE_HORIZONTAL * TILE_SIZE + 25;
public static final int TOWER_INFO_DAMAGE_Y = 0 * TILE_SIZE + 15;
public static final int TOWER_INFO_PRICE_X = TILE_HORIZONTAL * TILE_SIZE + 25;
public static final int TOWER_INFO_PRICE_Y = 0 * TILE_SIZE + 40;
public static final int TOWER_INFO_ATTACK_SPEED_X = (TILE_HORIZONTAL + 2) * TILE_SIZE + 25;
public static final int TOWER_INFO_ATTACK_SPEED_Y = 0 * TILE_SIZE + 15;
public static final int TOWER_INFO_ATTACK_RANGE_X = (TILE_HORIZONTAL + 2) * TILE_SIZE + 25;
public static final int TOWER_INFO_ATTACK_RANGE_Y = 0 * TILE_SIZE + 40;
//HEART
public static final Image HEART_ALIVE_IMAGE = new Image("file:src/resources/AssetsKit_2/PNG/Retina/towerDefense_tile300.png");
public static final Image HEART_DEAD_IMAGE = new Image("file:src/resources/AssetsKit_2/PNG/Retina/towerDefense_tile301.png");
public static final int HEART_VERTICAL_POSITION = 4 * TILE_SIZE + 16;
public static final int HEART_HORIZONTAL_POSITION = TILE_HORIZONTAL * TILE_SIZE + 16;
//BALANCE
public static final int BALANCE_HORIZONTAL_POSITION = (TILE_HORIZONTAL + 1) * TILE_SIZE + 32;
public static final int BALANCE_VERTICAL_POSITION = 3 * TILE_SIZE;
//LEVEL
public static final int LEVEL_HORIZONTAL_POSITION = (TILE_HORIZONTAL + 1) * TILE_SIZE + 48;
public static final int LEVEL_VERTICAL_POSITION = 6 * TILE_SIZE;
public static final int maximumLevels = 4;
//PLAYER
public static final int maximumHearts = 5;
public static final int initialBalance = 100;
//BUTTON
public static final int START_BUTTON_LAYOUT_X = (TILE_HORIZONTAL + 2) * TILE_SIZE;
public static final int START_BUTTON_LAYOUT_Y = (TILE_VERTICAL - 1) * TILE_SIZE;
//ELAPSED TIME BETWEEN ENEMY IN EACH LEVEL
public static final long elapsedTimeBetweenEnemy[] = {(long)4e9, (long)3e9, (long)2e9, (long)15e8, (long)1e9, (long)5e8};
}
<file_sep>package mygame.enemy;
import javafx.scene.image.Image;
import mygame.Config;
import mygame.GameField;
public class SmallerEnemy extends Enemy {
public SmallerEnemy() {}
public SmallerEnemy(int x, int y, GameField field) {
super(x, y, field);
super.setMaximumHitPoint(10);
super.setHitPoint(10);
super.setSpeed(4);
setImage(Config.ENEMY_SMALLER);
setReward(10);
}
}
<file_sep>package mygame.tile;
import mygame.Config;
public class Target extends Road {
public Target(int x, int y) {
super(x, y);
setImage(Config.TARGET_IMAGE);
}
}
<file_sep>package mygame.tile.tower;
import javafx.scene.canvas.GraphicsContext;
import javafx.scene.image.Image;
import mygame.Bullet;
import mygame.Config;
import mygame.enemy.Enemy;
import mygame.tile.Tile;
public class Tower extends Tile {
private double attackSpeed;
private int attackRange;
private int damage = 1;
private long lastBulletGenerationTime;
private int price;
private Image base;
public Tower() {}
public Tower(int x, int y) {
super(x, y);
lastBulletGenerationTime = 0;
this.base = Config.TOWER_BASE_IMAGE;
}
public int getPrice() {
return price;
}
public Image getBase() {
return base;
}
public void setBase(Image base) {
this.base = base;
}
public void setPrice(int price) {
this.price = price;
}
public double getAttackSpeed() {
return attackSpeed;
}
public void setAttackSpeed(double attackSpeed) {
this.attackSpeed = attackSpeed;
}
public int getAttackRange() {
return attackRange;
}
public void setAttackRange(int attackRange) {
this.attackRange = attackRange;
}
public int getDamage() {
return damage;
}
public void setDamage(int damage) {
this.damage = damage;
}
public long getLastBulletGenerationTime() {
return lastBulletGenerationTime;
}
public void setLastBulletGenerationTime(long lastBulletGenerationTime) {
this.lastBulletGenerationTime = lastBulletGenerationTime;
}
// Bullet tmp = new Bullet(t.getX(), t.getY(), (Tower) t, nearestEnemy, currentNanoTime);
public Bullet fire(Enemy target, long currentNanoTime){
Bullet shot = new Bullet(super.getX(),super.getY(),this,target,currentNanoTime);
System.out.println(shot.getDegree());
super.setDirection(shot.getDegree());
return shot;
}
public void draw(GraphicsContext gc){
gc.drawImage(base, getX(), getY());
super.draw(gc);
}
}
|
1cf75f5cb6891e7d268e95ed0021c6dcf228fc0a
|
[
"Markdown",
"Java"
] | 13 |
Java
|
unik00/TowerDefense
|
2ab795ae9c7f49d02c2e00718cc37d67d159ff53
|
53bef50553d1b5f1105077d58846498a727beb39
|
refs/heads/main
|
<file_sep>import { ษตDomAdapter } from '@angular/common';
import { Component } from '@angular/core';
import { AlertController } from '@ionic/angular';
@Component({
selector: 'app-tab4',
templateUrl: 'tab4.page.html',
styleUrls: ['tab4.page.scss']
})
export class Tab4Page {
calculate1(){this.presentAlert1();}
calculate2(){this.presentAlert2();}
calculate3(){this.presentAlert3();}
calculate4(){this.presentAlert4();}
calculate5(){this.presentAlert5();}
calculate6(){this.presentAlert6();}
calculate7(){this.presentAlert7();}
calculate8(){this.presentAlert8();}
calculate9(){this.presentAlert9();}
calculate10(){this.presentAlert10();}
calculate11(){this.presentAlert11();}
calculate12(){this.presentAlert12();}
calculate13(){this.presentAlert13();}
calculate14(){this.presentAlert14();}
calculate15(){this.presentAlert15();}
calculate16(){this.presentAlert16();}
calculate17(){this.presentAlert17();}
async presentAlert1() {
const alert = await this.alertController.create({
header: 'Info',
message: 'Folic acid reduces your babyโs risk of neural tube defects to almost nil. Vitamin D helps your baby develop healthy bones, teeth and muscles.',
buttons: ['Okay']
});
await alert.present();
}
async presentAlert2() {
const alert = await this.alertController.create({
header: 'Info',
message: 'Being sedentary (sitting down a lot) is not healthy for you or your baby. The recommended amount of activity for pregnant women is 30 minutes a day four times a week.',
buttons: ['Okay']
});
await alert.present();
}
async presentAlert3() {
const alert = await this.alertController.create({
header: 'Info',
message: 'Some foods carry a small risk of infections, such as toxoplasmosis or listeriosis. Others can give you food poisoning, such as salmonella. Others have too much vitamin A or mercury, which can harm your developing.',
buttons: ['Okay']
});
await alert.present();
}
async presentAlert4() {
const alert = await this.alertController.create({
header: 'Info',
message: 'Your babyโs movements are a sign that they are well. Start monitoring movements at around week 24 and if you notice a reduction in movement seek help immediately.',
buttons: ['Okay']
});
await alert.present();
}
async presentAlert5() {
const alert = await this.alertController.create({
header: 'Info',
message: 'Going to sleep on your back in the third trimester doubles your risk of stillbirth. You may wake up in all sorts of positions during the night, and thatโs OK, but if you go to sleep on your side you will be sleeping safely for your baby.',
buttons: ['Okay']
});
await alert.present();
}
async presentAlert6() {
const alert = await this.alertController.create({
header: 'Info',
message: '1 in 10 women suffer mental ill-health when they are pregnant. Donโt write off negative, unusual or unexpected bad feelings as part of your pregnancy.',
buttons: ['Okay']
});
await alert.present();
}
async presentAlert7() {
const alert = await this.alertController.create({
header: 'Info',
message: 'In pregnancy you will be offered some vaccinations, such as whooping cough vaccine or flu vaccine.',
buttons: ['Okay']
});
await alert.present();
}
async presentAlert8() {
const alert = await this.alertController.create({
header: 'Info',
message: 'It is recommended to carry your antenatal notes everywhere you go as they contain all your medical and pregnancy history.',
buttons: ['Okay']
});
await alert.present();
}
async presentAlert9() {
const alert = await this.alertController.create({
header: 'Info',
message: 'Women have said that the best time in pregnancy for overseas holidays is the middle of pregnancy. Nausea and tiredness are common in the first 12 weeks of pregnancy, and the risk of miscarriage is also higher in the first three months (this is not linked to travelling). Travelling in the final months of pregnancy can be tiring and uncomfortable.',
buttons: ['Okay']
});
await alert.present();
}
async presentAlert10() {
const alert = await this.alertController.create({
header: 'Info',
message: 'There are some symptoms that should always be checked with a midwife or doctor as they could be a sign that the baby is unwell: bleeding from the vagina, painful urination, sharp or continuing abdominal pain or cramps, persistent or severe headache, swelling in face, hands, or legs, blurred vision, itching, baby movement slowing down, excessive or smelly vaginal discharge',
buttons: ['Okay']
});
await alert.present();
}
async presentAlert11() {
const alert = await this.alertController.create({
header: 'Info',
message: 'Thereโs no need for extra calories in the first or second trimesters. In the third trimester, you should have an extra 200 calories if you are active.',
buttons: ['Okay']
});
await alert.present();
}
async presentAlert12() {
const alert = await this.alertController.create({
header: 'Info',
message: 'Smoking is a major modifiable risk factor (something you can change) for all sorts of health problems for your baby. Itโs never too late to stop.',
buttons: ['Okay']
});
await alert.present();
}
async presentAlert13() {
const alert = await this.alertController.create({
header: 'Info',
message: 'Donโt drink alcohol, especially in the first trimester when the babyโs brain is going through a period of intense development.',
buttons: ['Okay']
});
await alert.present();
}
async presentAlert14() {
const alert = await this.alertController.create({
header: 'Info',
message: 'Cocaine, meta-amphetamines, cannabis, psychoactive substances (so called โlegal highsโ) are all likely to increase risks of health problems.',
buttons: ['Okay']
});
await alert.present();
}
async presentAlert15() {
const alert = await this.alertController.create({
header: 'Info',
message: 'Most exercise is safe and healthy, but a handful of activities could cause injury to the baby.',
buttons: ['Okay']
});
await alert.present();
}
async presentAlert16() {
const alert = await this.alertController.create({
header: 'Info',
message: 'More than 60% of women who checked their caffeine intake on our caffeine calculator were surprised to find that they were over the limit. High levels of caffeine during pregnancy can result in low birth weight babies, which can increase the risk of health problems in later life.',
buttons: ['Okay']
});
await alert.present();
}
async presentAlert17() {
const alert = await this.alertController.create({
header: 'Info',
message: 'Cutting out food groups may deprive your baby against nutrients they need for growth. Instead of dieting, it is best to have a healthy balanced diet.',
buttons: ['Okay']
});
await alert.present();
}
constructor(public alertController: AlertController) {
}
}
<file_sep>import { ษตDomAdapter } from '@angular/common';
import { Component } from '@angular/core';
import { AlertController } from '@ionic/angular';
@Component({
selector: 'app-tab1',
templateUrl: 'tab1.page.html',
styleUrls: ['tab1.page.scss']
})
export class Tab1Page {
num1: number = 28;
from: Date = null;
to: Date = null;
edd: Date = null;
conceive: Date = null;
pregnancytest: Date = null;
babyheart: Date = null;
morningsickness: Date = null;
appointment: Date = null;
second : Date = null;
organ: Date = null;
heart: Date = null;
ultrasound: Date = null;
third: Date = null;
week: number;
days: number;
left: number;
percent: string;
clear(){
this.from = null;
this.to = null;
this.num1 = 28;
this.edd = null;
}
calculate(){
if (this.from == null || this.to == null || this.num1 == null){
this.presentAlert();
}else{
var todate = new Date(this.to);
var fromdate = new Date(this.from);
if (todate > fromdate){
this.presentAlert2();
} else if (this.num1 < 22 || this.num1 > 44) {
this.presentAlert3();
} else{
var newdate = new Date(this.to);
var modifierx = this.num1 - 28;
var modifier = 280 + ((28 - this.num1)*-1);
newdate.setDate(newdate.getDate()+modifier);
this.edd = newdate;
//Ukur usia kehamilan
var todate = new Date(this.from);
var fromdate = new Date(this.to);
var timediff = todate.getTime()-fromdate.getTime();
var difference_in_days = Math.floor(timediff/(1000*3600*24));
var difference_in_weeks1 = difference_in_days/7;
if (difference_in_weeks1>0){
difference_in_weeks1 = Math.floor(difference_in_weeks1);
}else{
difference_in_weeks1 = Math.ceil(difference_in_weeks1);
}
var difference_in_weeks2 = Math.floor(difference_in_days - (7*difference_in_weeks1));
this.week = difference_in_weeks1;
this.days = difference_in_weeks2;
//ukur progress
var todate = new Date(this.edd);
var fromdate = new Date(this.from);
var timediff = todate.getTime()-fromdate.getTime();
var difference_in_dayss = Math.floor(timediff/(1000*3600*24))+1;
this.left = difference_in_dayss;
//ukur persentase
var todate = new Date(this.edd);
var fromdate = new Date(this.to);
var timediff = todate.getTime()-fromdate.getTime();
var difference_in_daysss = Math.floor(timediff/(1000*3600*24));
var prog = (difference_in_days / difference_in_daysss)*100;
this.percent = (prog).toFixed(2);
//Milestones
var newdate = new Date(this.to);
this.conceive = newdate;
this.conceive.setDate(this.conceive.getDate()+14);
var newdate = new Date(this.to);
this.pregnancytest = newdate;
this.pregnancytest.setDate(this.pregnancytest.getDate()+23);
var newdate = new Date(this.to);
this.babyheart = newdate;
this.babyheart.setDate(this.babyheart.getDate()+35);
var newdate = new Date(this.to);
this.morningsickness = newdate;
this.morningsickness.setDate(this.morningsickness.getDate()+63);
var newdate = new Date(this.to);
this.appointment = newdate;
this.appointment.setDate(this.appointment.getDate()+70);
var newdate = new Date(this.to);
this.second = newdate;
this.second.setDate(this.second.getDate()+98);
var newdate = new Date(this.to);
this.organ = newdate;
this.organ.setDate(this.organ.getDate()+140);
var newdate = new Date(this.to);
this.heart = newdate;
this.heart.setDate(this.heart.getDate()+142);
var newdate = new Date(this.to);
this.ultrasound = newdate;
this.ultrasound.setDate(this.ultrasound.getDate()+147);
var newdate = new Date(this.to);
this.third = newdate;
this.third.setDate(this.third.getDate()+196);
}
}
}
async presentAlert() {
const alert = await this.alertController.create({
header: 'Missing Value',
message: 'Please input all the required values!',
buttons: ['Okay']
});
await alert.present();
}
async presentAlert2() {
const alert = await this.alertController.create({
header: 'Wrong Value',
message: 'First day of LMP must be before today date!',
buttons: ['Okay']
});
await alert.present();
}
async presentAlert3() {
const alert = await this.alertController.create({
header: 'Wrong Value',
message: 'Average cycles length must be between 22 to 44 days!',
buttons: ['Okay']
});
await alert.present();
}
roundoff(value){
var value4 = "" + Math.round(value);
var bonus2 = value4.length + 1;
var bonus = 0;
if (value < 100){bonus=bonus+1};
if (value < 10){bonus=bonus+1};
if (value < 1){bonus=bonus+1};
if (value < 0.1){bonus=bonus+1};
if (value < 0.01){bonus=bonus+1};
if (value < 0.001){bonus=bonus+1};
if (value < 0.0001){bonus=bonus+1};
bonus2 = bonus2+bonus;
var whole = Math.round(value * Math.pow(10, bonus));
var whole2 = "" + whole * Math.pow(10, -1*bonus);
var whole3 = whole2.substr(0,bonus2);
return whole3;
}
constructor(public alertController: AlertController) {
}
}
<file_sep># pregnancy-due-dates
<file_sep>import { ษตDomAdapter } from '@angular/common';
import { Component } from '@angular/core';
import { AlertController } from '@ionic/angular';
@Component({
selector: 'app-tab3',
templateUrl: 'tab3.page.html',
styleUrls: ['tab3.page.scss']
})
export class Tab3Page {
num: number
week: number;
days: number;
left: number;
percent: string;
from: Date = null;
edd: Date = null;
selection: string;
eq1: string = 'true';
eq2: string;
eq3: string;
clear(){
this.from = null;
this.num = null;
this.edd = null;
}
calculate(){
if (this.from == null || this.num == null){
this.presentAlert();
}else{
if (this.eq1 == "true"){
var weeks = 5.2867 + (0.1584 * this.num) - (0.0007 * this.num * this.num);
} else if (this.eq2 == "true"){
var weeks = Math.floor((2 * this.num + 44.2))/7;
} else if (this.eq3 == "true"){
var a = (1.854 + (0.010451 * this.num) - (0.000029919 * this.num * this.num) + 0.000000043156 * this.num * this.num * this.num);
var e = 2.718281828459045235360287471352;
var weeks = Math.pow(e, a);
}
var total_days = Math.ceil(weeks * 7);
var days_left = 280 - total_days;
var newdate = new Date(this.from);
newdate.setDate(newdate.getDate()+days_left);
this.edd = newdate;
//ukur usia kehamilan
var difference_in_days = total_days;
var difference_in_weeks1 = difference_in_days/7;
if (difference_in_weeks1>0){
difference_in_weeks1 = Math.floor(difference_in_weeks1);
}else{
difference_in_weeks1 = Math.ceil(difference_in_weeks1);
}
var difference_in_weeks2 = Math.floor(difference_in_days - (7*difference_in_weeks1));
this.week = difference_in_weeks1;
this.days = difference_in_weeks2;
//ukur progress
var difference_in_dayss = 280-total_days;
this.left = difference_in_dayss;
//ukur persentase
var prog = (total_days / 280)*100;
this.percent = (prog).toFixed(2);
}
}
async presentAlert() {
const alert = await this.alertController.create({
header: 'Missing Value',
message: 'Please input all the required values!',
buttons: ['Okay']
});
await alert.present();
}
check(event) {
console.log('radioGroupChange',event.detail);
this.selection = event.target.value;
if (this.selection == 'eq1'){
this.eq1 = "true";
this.eq2 = null;
this.eq3 = null;
} else if (this.selection == 'eq2'){
this.eq2 = "true";
this.eq1 = null;
this.eq3 = null;
} else if (this.selection == 'eq3'){
this.eq3 = "true";
this.eq2 = null;
this.eq1 = null;
}
}
constructor(public alertController: AlertController) {
}
}
|
23c75bb7b596e2cef8a6de3df2df72b6b8caa254
|
[
"Markdown",
"TypeScript"
] | 4 |
TypeScript
|
anggariskyraharja/pregnancy-due-dates
|
785a5f58bc69b1d7b5f14883786a54673f0dd39b
|
81bed6bae8b48c3fec14045e043a125dfb318e6b
|
refs/heads/master
|
<repo_name>sfreeman422/SpotifySocial<file_sep>/controllers/spotifyControl.js
// This is the code to authenticate against Spotify accounts, and to pull a Spotify user's top artists.
var models = require('../models');
var express = require('express');
var request = require('request');
var querystring = require('querystring');
var cookieParser = require('cookie-parser');
var users = require('../models')["Users"];
// users.sync({force: true});
// These are our team's Spotify app credentials. These should be kept secret by not putting the server.js file in the public folder.
var client_id = '7e460edc49e64d138a8f87bd87cfdc1c';
var client_secret = '<KEY>';
var redirect_uri = 'http://localhost:3000/profile/callback';
// setting up global variables for user profile info. Each of these comes from a user's Spotify account info.
var userID = "";
var userName = "";
var userEmail = "";
var favArtists = [];
// Generates a random string containing numbers and letters
// * @param {number} length The length of the string
// * @return {string} The generated string
var generateRandomString = function(length) {
var text = '';
var possible = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789';
for (var i = 0; i < length; i++) {
text += possible.charAt(Math.floor(Math.random() * possible.length));
}
return text;
};
var stateKey = 'spotify_auth_state';
var app = express();
app.use(express.static(__dirname + '/public'))
.use(cookieParser());
app.get('/', function(req, res){
res.redirect('/seatgeek/getconcerts/'+userID);
});
app.get('/login', function(req, res) {
var state = generateRandomString(16);
// create a cookie to store the user's Spotify auth state
res.cookie(stateKey, state);
// set scope for Spotify user info -- this will determine what info is available to our app
var scope = 'user-read-private user-read-birthdate user-read-email user-top-read';
// request authorization
res.redirect('https://accounts.spotify.com/authorize?' +
querystring.stringify({
response_type: 'code',
client_id: client_id,
scope: scope,
redirect_uri: redirect_uri,
state: state
}));
});
// callback route to follow the authenitcation
app.get('/callback', function(req, res) {
// request refresh and access tokens
// the response sends back a code -- this code will be exchanged for an access token in the POST request
var code = req.query.code || null;
var state = req.query.state || null;
var storedState = req.cookies ? req.cookies[stateKey] : null;
// check the state parameter
if (state === null || state !== storedState) {
res.redirect('/#' +
querystring.stringify({
error: 'state_mismatch'
}));
} else {
res.clearCookie(stateKey);
var authOptions = {
url: 'https://accounts.spotify.com/api/token',
form: {
code: code,
redirect_uri: redirect_uri,
grant_type: 'authorization_code'
},
headers: {
'Authorization': 'Basic ' + (new Buffer(client_id + ':' + client_secret).toString('base64'))
},
json: true
};
// POST method uses the access token from the above GET request. That code is sent and an access token is received.
// The access token will be used to ping the API for user info, like favorite artists.
request.post(authOptions, function(error, response, body) {
if (!error && response.statusCode === 200) {
var access_token = body.access_token,
refresh_token = body.refresh_token;
var options = {
url: 'https://api.spotify.com/v1/me',
headers: { 'Authorization': 'Bearer ' + access_token },
json: true
};
// use the access token to access the Spotify Web API
request.get(options, function(error, response, body) {
userID = body.id;
userName = body.display_name;
// userPic = body.images[0].url;
userEmail = body.email;
console.log("user id is " + userID);
console.log("user name is "+ userName);
console.log("user's email is " + userEmail);
// console.log("user's profile pic is " + userPic);
if (!error && response.statusCode === 200) {
// use the access token to access the Spotify Web API for a user's top artists
request.get({
url: 'https://api.spotify.com/v1/me/top/artists?limit=5&offset=0&time_range=medium_term',
headers: { 'Authorization': 'Bearer ' + access_token },
json: true
}, function(error, response, body) {
for (var i = 0; i < body.items.length; i++) {
favArtists.push(body.items[i].name);
};
console.log(favArtists);
users
.create({ user_id: userID,
name: userName,
email: userEmail,
// userPic: userPic,
favArtists1: favArtists[0],
favArtists2: favArtists[1],
favArtists3: favArtists[2],
favArtists4: favArtists[3],
favArtists5: favArtists[4]
})
.then(function() {
users
.findOrCreate({where: {email: userEmail}, defaults: {user_id: userID}})
.spread(function(user, created) {
console.log(user.get({
plain: true
}))
console.log(created);
})
})
});
}
});
// redirect the user to the survey page
res.redirect('/survey.html');
}
});
}
});
// create a /matches route -- this will serve up matches.hbs after the user clicks on the "show me matches" button
app.get('/matches', function(req, res){
res.render('matches');
});
// access tokens are set to expire -- the refresh will get a new token
app.get('/refresh_token', function(req, res) {
// requesting access token from refresh token
var refresh_token = req.query.refresh_token;
var authOptions = {
url: 'https://accounts.spotify.com/api/token',
headers: { 'Authorization': 'Basic ' + (new Buffer(client_id + ':' + client_secret).toString('base64')) },
form: {
grant_type: 'refresh_token',
refresh_token: refresh_token
},
json: true
};
request.post(authOptions, function(error, response, body) {
if (!error && response.statusCode === 200) {
var access_token = body.access_token;
res.send({
'access_token': access_token
});
}
});
});
module.exports = app; <file_sep>/models/concert.js
"use strict";
module.exports = function(sequelize, DataTypes) {
var Concerts = sequelize.define("Concerts", {
user_id: DataTypes.STRING,
eventName: DataTypes.STRING,
concert_id: DataTypes.INTEGER,
eventDate: DataTypes.DATE,
venueName: DataTypes.STRING,
venueAddress: DataTypes.STRING,
artists: DataTypes.STRING,
ticketURL: DataTypes.STRING,
attendees: DataTypes.STRING
}, {
});
return Concerts;
};<file_sep>/migrations/20161123015756-users.js
"use strict";
module.exports = {
up: function (queryInterface, Sequelize) {
return queryInterface.createTable(
'Users',
{
user_id: {
type: Sequelize.STRING,
primaryKey: true
},
name: Sequelize.STRING,
email: Sequelize.STRING,
// userPic: Sequelize.STRING,
favArtists1: Sequelize.STRING,
favArtists2: Sequelize.STRING,
favArtists3: Sequelize.STRING,
favArtists4: Sequelize.STRING,
favArtists5: Sequelize.STRING
});
},
down: function (queryInterface, Sequelize) {
return queryInterface
.dropTable('Users');
}
};
<file_sep>/migrations/20161118005942-concerts.js
"use strict";
module.exports = {
up: function (queryInterface, Sequelize) {
return queryInterface.createTable(
'Concerts',
{
user_id: {
type: Sequelize.STRING,
primaryKey: true
}
concert_id: {
type: Sequelize.INTEGER
},
eventName: {
type: Sequelize.STRING
},
eventDate: {
type: Sequelize.STRING
}
venueName: {
type: Sequelize.STRING
},
//Needs to be a string, separated by commas.
venueAddress:{
type: Sequelize.STRING
},
//Needs to be a string, separated by commas.
artists: {
type: Sequelize.STRING
},
ticketURL: {
type: Sequelize.STRING
},
attendees: {
type: Sequelize.STRING
}
});
},
down: function (queryInterface, Sequelize) {
return queryInterface
.dropTable('Concerts');
}
};
<file_sep>/README.md
# SpotifySocial
<h1>Concert Connect</h1>
<h2>Project description</h2>
<p>User must have a Spotify account they regularly use to formulate top artists and playlists and also take a survey on our site to determine their โconcert personalityโ and store each of these results as variables in SQL to come up with concerts users would enjoy going to and matches with other users with similar tastes to go with </p>
<h2>Screenshots</h2>
<p>*Include one or two screenshots of main UI/UX points of your app and how it solves a problem*</p>
<h2>Technologies used</h2>
Express<br></br>
mongod<br></br>
Utilized the Spotify and Seatgeek API <br></br>
MySQL and Sequelize<br></br>
Node.js <br></br>
Handlebars<br></br>
Heroku<br></br>
RSVP..Promise? (New Technology)<br></br>
<h2>Authors</h2>
<p>
<NAME> <br></br>
<NAME> <br></br>
<NAME> <br></br>
<NAME><br></br>
<NAME><br></br>
</p>
<h2>License</h2>
<p>
This project is licensed under the MIT License - see the LICENSE.md file for details</p>
<h2>Acknowledgments</h2>
<file_sep>/server.js
//Dependencies
var express = require('express');
var path = require('path');
var favicon = require('serve-favicon');
var logger = require('morgan');
var cookieParser = require('cookie-parser');
var bodyParser = require('body-parser');
var session = require('express-session');
var methodOverride = require('method-override');
var hbs = require('express-handlebars');
var request = require('request');
var queryString = require('querystring');
var sequelize = require('sequelize');
var mysql = require('mysql');
var models = require('./models');
var port = process.env.PORT || 3000;
//Controllers
var mainControl = require('./controllers/mainControl.js');
var spotifyControl = require('./controllers/spotifyControl.js');
var profileControl = require('./controllers/profileControl.js');
var seatgeekControl = require('./controllers/seatgeekControl.js');
//Express settings
//=========================================//
var app = express();
app.use(methodOverride('_method'));
app.use(session({secret: 'spotify', cookie: {maxAge: 60000}}));
app.use(cookieParser());
//Handlebars Setup
app.set('views', path.join(__dirname, 'views'));
app.engine('handlebars', hbs({defaultLayout: 'main'}));
app.set('view engine', 'handlebars');
app.use(favicon(__dirname + '/public/images/favicon.ico'));
app.use(logger('dev'));
app.use(bodyParser.json());
app.use(bodyParser.urlencoded({extended: false}));
app.use(cookieParser());
app.use(express.static(path.join(__dirname, 'public')));
//Controller Routing
app.use('/', mainControl);
app.use('/profile', spotifyControl);
app.use('/seatgeek', seatgeekControl);
app.use('/callback', profileControl);
//Forwards errors to the Error Handler
app.use(function(req, res, next){
var err = new Error("Not found.");
err.status = 404;
next(err);
});
//Error Handler
app.use(function(err, res, next){
res.status(err.status || 500);
res.render('error', {
message: err.message,
error: (app.get('env') === 'development') ? err : {}
})
})
models.sequelize.sync({force: true}).then(function(){
app.listen(port, function(){
console.log("Listening on port: "+port);
})
});
module.exports = app;
|
b399357fcc377e707f226a6f1275f6dbd7e46d76
|
[
"JavaScript",
"Markdown"
] | 6 |
JavaScript
|
sfreeman422/SpotifySocial
|
88d62768a5bed5b50247b096e467c0805ed2975b
|
8f3837db230eba8dd20251dd62195a61d8a66951
|
refs/heads/master
|
<repo_name>davelyon/CTYay<file_sep>/lib/ctyay/request.rb
module Ctyay
class Request
BASE_URL = 'http://www.ctabustracker.com/bustime/api/v1'
def initialize(action='', params='')
@request = RestClient::Resource.new(BASE_URL + action + "?key=#{Ctyay::Config.bus_api_key}" + params)
end
def get
@request.get
end
def url
@request.url
end
def self.routes
new("/getroutes").get
end
def self.directions(route)
new("/getdirections", "&rt=#{route}").get
end
def self.stops(route_id, direction)
new('/getstops', "&rt=#{route_id}&dir=#{URI.escape(direction)}").get
end
def self.time
new('/gettime').get
end
end
end
<file_sep>/spec/ctyay/routes_spec.rb
require 'spec_helper'
describe Ctyay::Route do
let!(:route_xml) { File.read("spec/data/routes.example.xml") }
let!(:directions_xml) { File.read("spec/data/directions.example.xml") }
before(:all) do
Ctyay::Config.bus_api_key = 123
FakeWeb.register_uri(:get, "http://www.ctabustracker.com/bustime/api/v1/getroutes?key=123", :body => route_xml)
FakeWeb.register_uri(:get, "http://www.ctabustracker.com/bustime/api/v1/getdirections?key=123&rt=3", :body => directions_xml)
end
context "attributes" do
subject { Ctyay::Route.new("8", "Halsted") }
describe "#short_name" do
it { subject.short_name.should == "8" }
end
describe "#common_name" do
it { subject.common_name.should == "Halsted" }
end
describe "#description" do
it { subject.description.should == "8 Halsted" }
end
describe "#directions" do
let(:route_xml) { File.read("spec/data/directions.example.xml") }
before do
Ctyay::Route.should_receive(:route).with("3").and_return Ctyay::Route.new("3", "King Drive")
end
it "returns the directions" do
Ctyay::Route.route("3").directions.should == ["North Bound", "South Bound"]
end
end
describe "#stops" do
before do
Ctyay::Stop.should_receive(:all).with("8", "East Bound").and_return([])
end
it "delegates to Stop.all" do
subject.stops("East Bound").should == []
end
end
end
context "accesing api data" do
include Ctyay
let!(:route1) { double("Route") }
let!(:route2) { double("Route") }
let!(:route3) { double("Route") }
before do
Route.should_receive(:new).with("1", "Indiana/Hyde Park").and_return(route1)
Route.should_receive(:new).with("2", "Hyde Park Express").and_return(route2)
Route.should_receive(:new).with("3", "King Drive").and_return(route3)
route1.stub(:short_name).and_return( 1 )
route2.stub(:short_name).and_return( 2 )
route3.stub(:short_name).and_return( 3 )
end
describe ".route" do
before do
end
it "returns a route" do
Route.route(1).should == route1
end
end
describe ".all" do
before do
end
it "returns a collection of routes" do
Route.all.should == [route1, route2, route3]
end
end
end
end
<file_sep>/README.md
# CTyay: A simple wrapper for the CTA Bus/Train tracker API
## Configuration
You'll need an API Key from the CTA first.
- Bus: http://www.transitchicago.com/developers/bustracker.aspx
- Train: http://www.transitchicago.com/developers/traintracker.aspx
CTyay::Config.bus_api_key = '0118 999 881 999 119 725 3' # Use your API Key
## Basic Usage
### Get Routes:
Route.all # Get all routes (A collection of Route objects)
route = Route.route("20")
route.short_name # 'rt' element or the number of the route "8" or "X20"
route.common_name # 'rtnm' element or the common name "Madison" for 20 route
route.description # Convenience method for short + common name -> "8 Halsted"
### Route Directions:
# Routes can go in multiple directions, e.g. North/South
Routes.route("20").directions # ["North Bound", "East Bound"]
### Route Stops:
# After selecting a route and a direction, stops can be retrieved.
stops = Routes.route("20").stops(direction: "East Bound") # Returns a collection of Stops
stop = stop.first
stop.id # 4727
stop.name # "<NAME>"
stop.location # { 41.881265, -87.66849 }
stop.latitude # 41.881265
stop.longitude # -87.66849
<file_sep>/spec/ctyay/request_spec.rb
require 'spec_helper'
describe Ctyay::Request do
subject { Ctyay::Request }
describe "#new" do
before do
Ctyay::Config.bus_api_key = "123"
end
it "includes an API key" do
subject.new.url.should include("?key=123")
end
context "with action" do
it "returns an http request with that action" do
subject.new('route').url.should include('route')
end
end
end
end
<file_sep>/spec/spec_helper.rb
require 'ctyay'
require 'fakeweb'
RSpec.configure do |config|
config.mock_framework = :rspec
end
<file_sep>/lib/ctyay.rb
require "ctyay/version"
require "rest-client"
require "nokogiri"
module Ctyay
autoload(:CTATime, 'ctyay/time')
autoload(:Config, 'ctyay/config')
autoload(:Request, 'ctyay/request')
autoload(:Route, 'ctyay/routes')
autoload(:Stop, 'ctyay/stop')
end
<file_sep>/lib/ctyay/stop.rb
module Ctyay
class Stop
attr_reader :id, :name, :latitude, :longitude
def self.all(route_id, direction)
parse Nokogiri::XML.parse Request.stops(route_id, direction)
end
def initialize(id, name, latitude, longitude)
@id, @name, @latitude, @longitude = id, name, latitude, longitude
end
def location
[latitude, longitude]
end
private
def self.parse(xml)
xml.xpath("//stop").map do |stop|
new( stop.xpath("stpid").inner_text, stop.xpath("stpnm").inner_text,
stop.xpath("lat").inner_text, stop.xpath("lon").inner_text )
end
end
end
end
<file_sep>/lib/ctyay/config.rb
module Ctyay
class Config
def self.bus_api_key= key
@@bus_api_key = key
end
def self.bus_api_key
@@bus_api_key
end
end
end
<file_sep>/spec/ctyay/stop_spec.rb
require 'spec_helper'
describe Ctyay::Stop do
let!(:stop_xml) { File.open("spec/data/stops.example.xml") }
before(:all) do
Ctyay::Config.bus_api_key = 123
FakeWeb.register_uri(:get, "http://www.ctabustracker.com/bustime/api/v1/getstops?key=123&rt=20&dir=East%20Bound", :body => stop_xml)
end
subject { Ctyay::Stop.new("1", "Lake/Halsted", "42.88842", "-37.66532") }
describe "#id" do
it { subject.id.should == "1" }
end
describe "#name" do
it { subject.name.should == "Lake/Halsted" }
end
describe "#latitude" do
it { subject.latitude.should == "42.88842" }
end
describe "#longitude" do
it { subject.longitude.should == "-37.66532" }
end
describe "#location" do
it { subject.location.should == ["42.88842", "-37.66532"] }
end
context "remote api access" do
describe ".all" do
let(:stop_1) { double("Stop") }
let(:stop_2) { double("Stop") }
before do
Ctyay::Stop.should_receive(:new).with("4727", "1633 W Madison", "41.881408012088", "-87.668516635895").and_return( stop_1 )
Ctyay::Stop.should_receive(:new).with("449", "Washington & Wabash", "41.883229232153", "-87.625987529755").and_return( stop_2 )
end
it "returns a collection of Stops" do
Ctyay::Stop.all("20", "East Bound").should == [stop_1, stop_2]
end
end
end
end
<file_sep>/ctyay.gemspec
# -*- encoding: utf-8 -*-
$:.push File.expand_path("../lib", __FILE__)
require "ctyay/version"
Gem::Specification.new do |s|
s.name = "ctyay"
s.version = Ctyay::VERSION
s.authors = ["Dave Lyon"]
s.email = ["<EMAIL>"]
s.homepage = "http://github.com/davelyon/ctyay"
s.summary = %q{API Wrapper for CTA Train/Bus Tracking}
s.description = %q{}
s.rubyforge_project = "ctyay"
s.files = `git ls-files`.split("\n")
s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n")
s.executables = `git ls-files -- bin/*`.split("\n").map{ |f| File.basename(f) }
s.require_paths = ["lib"]
# specify any dependencies here; for example:
s.add_development_dependency "rspec", "2.6.0"
s.add_development_dependency "fakeweb", "1.3.0"
s.add_runtime_dependency "rest-client", "1.6.7"
s.add_runtime_dependency "nokogiri", "1.5.0"
end
<file_sep>/spec/ctyay/time_spec.rb
require 'spec_helper'
describe Ctyay::CTATime do
describe ".time" do
let(:time_xml) { File.read("spec/data/time.example.xml") }
before do
Ctyay::Config.bus_api_key=123
FakeWeb.register_uri(:get, "http://www.ctabustracker.com/bustime/api/v1/gettime?key=123", :body => time_xml)
end
it "returns the official CTA time" do
Ctyay::CTATime.time.should == Time.new("20111212 00:00:00")
end
end
end
<file_sep>/lib/ctyay/time.rb
module Ctyay
class CTATime
def self.time
Time.new( Nokogiri::XML.parse(Request.time).xpath("//tm").inner_text )
end
end
end
<file_sep>/spec/ctyay/config_spec.rb
require 'spec_helper'
describe Ctyay::Config do
context "bus api" do
it "holds your key" do
Ctyay::Config.bus_api_key = "bus api key"
Ctyay::Config.bus_api_key.should == "bus api key"
end
end
end
<file_sep>/lib/ctyay/routes.rb
module Ctyay
class Route
attr_reader :short_name, :common_name
def initialize(short_name, common_name)
@short_name = short_name
@common_name = common_name
end
def self.route(id)
@routes ||= all
@routes.select { |c| c.short_name == id }.first
end
def self.all
@routes = parse Nokogiri::XML.parse(Request.routes)
end
def description
[short_name, common_name].join " "
end
def directions
Nokogiri::XML.parse(Request.directions(short_name)).xpath("//dir").map(&:inner_text)
end
def stops(direction)
Stop.all(short_name, direction)
end
private
def self.parse(xml)
xml.xpath("//route").map do |route|
new( route.xpath("rt").inner_text, route.xpath("rtnm").inner_text)
end
end
end
end
|
a8502e1c8b6434d698a67e85ed9211e53d7ee30a
|
[
"Markdown",
"Ruby"
] | 14 |
Ruby
|
davelyon/CTYay
|
9197c75bd66222aa30f7cad8cfc3df6007a624a6
|
5c58cbc888d76c2de60fbac55d459a46a1f13c7e
|
refs/heads/master
|
<repo_name>0xD34F/0xD34F.github.io<file_sep>/spinners/main.js
var spinners = {
'spinner-1': {
source:
'.spinner-1 {\n\
font-size: 10px;\n\
background-color: transparent;\n\
border-radius: 50%;\n\
width: 2em;\n\
height: 2em;\n\
margin: 2em;\n\
animation: spinner-1 1s infinite ease-in-out;\n\
}\n\
@keyframes spinner-1 {\n\
0% {\n\
box-shadow: -2em -2em #000, 2em -2em #CCC, 2em 2em #000, -2em 2em #CCC;\n\
}\n\
100% {\n\
box-shadow: -2em 2em #CCC, -2em -2em #000, 2em -2em #CCC, 2em 2em #000;\n\
}\n\
}'
},
'spinner-2': {
source:
'.spinner-2 {\n\
font-size: 10px;\n\
width: 1em;\n\
height: 1em;\n\
margin: 1em 5.5em 5em 5.5em;\n\
border-radius: 50%;\n\
animation: spinner-2 1s infinite linear;\n\
background-color: transparent;\n\
transform: translateZ(0);\n\
}\n\
@keyframes spinner-2 {\n\
0%, 75%, 100% {\n\
box-shadow: -4.5em 2em 0 .5em #CCC, -1.5em 2em 0 .5em #CCC, 1.5em 2em 0 .5em #CCC, 4.5em 2em 0 .5em #CCC;\n\
}\n\
15% {\n\
box-shadow: -4.5em 2em 0 .7em #000, -1.5em 2em 0 .5em #CCC, 1.5em 2em 0 .5em #CCC, 4.5em 2em 0 .5em #CCC;\n\
}\n\
30% {\n\
box-shadow: -4.5em 2em 0 .5em #CCC, -1.5em 2em 0 .7em #000, 1.5em 2em 0 .5em #CCC, 4.5em 2em 0 .5em #CCC;\n\
}\n\
45% {\n\
box-shadow: -4.5em 2em 0 .5em #CCC, -1.5em 2em 0 .5em #CCC, 1.5em 2em 0 .7em #000, 4.5em 2em 0 .5em #CCC;\n\
}\n\
60% {\n\
box-shadow: -4.5em 2em 0 .5em #CCC, -1.5em 2em 0 .5em #CCC, 1.5em 2em 0 .5em #CCC, 4.5em 2em 0 .7em #000;\n\
}\n\
}'
},
'spinner-3': {
source:
'.spinner-3 {\n\
font-size: 10px;\n\
width: 2em;\n\
height: 2em;\n\
margin: 0 0 4em 0;\n\
background-color: transparent;\n\
border-radius: 50%;\n\
animation: spinner-3 2.5s infinite linear;\n\
}\n\
@keyframes spinner-3 {\n\
0% {\n\
box-shadow: -10.5em 2em transparent, -10.5em 2em transparent, -10.5em 2em transparent, -10.5em 2em transparent;\n\
}\n\
5% {\n\
box-shadow: -10.5em 2em transparent, -10.5em 2em transparent, -10.5em 2em transparent, -5.5em 2em #888;\n\
}\n\
10% {\n\
box-shadow: -10.5em 2em transparent, -10.5em 2em transparent, -6.5em 2em #888, -0.5em 2em;\n\
}\n\
15% {\n\
box-shadow: -10.5em 2em transparent, -7.5em 2em #888, -2.5em 2em, 4.5em 2em;\n\
}\n\
20% {\n\
box-shadow: -8.5em 2em #888, -4.5em 2em, 1.5em 2em, 4.5em 2em;\n\
}\n\
25% {\n\
box-shadow: -6.5em 2em, -1.5em 2em, 1.5em 2em, 4.5em 2em;\n\
}\n\
30%, 55% {\n\
box-shadow: -4.5em 2em, -1.5em 2em, 1.5em 2em, 4.5em 2em;\n\
}\n\
60% {\n\
box-shadow: -4.5em 2em, -1.5em 2em, 1.5em 2em, 6.5em 2em;\n\
}\n\
65% {\n\
box-shadow: -4.5em 2em, -1.5em 2em, 4.5em 2em, 8.5em 2em #888;\n\
}\n\
70% {\n\
box-shadow: -4.5em 2em, 2.5em 2em, 7.5em 2em #888, 10.5em 2em transparent;\n\
}\n\
75% {\n\
box-shadow: 0.5em 2em, 6.5em 2em #888, 10.5em 2em transparent, 10.5em 2em transparent;\n\
}\n\
80% {\n\
box-shadow: 5.5em 2em #888, 10.5em 2em transparent, 10.5em 2em transparent, 10.5em 2em transparent;\n\
}\n\
85%, 100% {\n\
box-shadow: 10.5em 2em transparent, 10.5em 2em transparent, 10.5em 2em transparent, 10.5em 2em transparent;\n\
}\n\
}'
},
'spinner-4': {
source:
'.spinner-4 {\n\
font-size: 10px;\n\
width: 1em;\n\
height: 1em;\n\
margin: 0 0 4em 0;\n\
border-radius: 50%;\n\
animation: spinner-4 1.5s infinite linear;\n\
background-color: transparent;\n\
transform: translateZ(0);\n\
}\n\
@keyframes spinner-4 {\n\
0%, 90%, 100% {\n\
box-shadow: -4.5em 2em 0 -.5em, -1.5em 2em 0 -.5em, 1.5em 2em 0 -.5em, 4.5em 2em 0 -.5em;\n\
}\n\
10% {\n\
box-shadow: -4.5em 2em 0 0em, -1.5em 2em 0 -.5em, 1.5em 2em 0 -.5em, 4.5em 2em 0 -.5em;\n\
}\n\
20% {\n\
box-shadow: -4.5em 2em 0 .5em, -1.5em 2em 0 0em, 1.5em 2em 0 -.5em, 4.5em 2em 0 -.5em;\n\
}\n\
30% {\n\
box-shadow: -4.5em 2em 0 1em, -1.5em 2em 0 .5em, 1.5em 2em 0 0em, 4.5em 2em 0 -.5em;\n\
}\n\
40% {\n\
box-shadow: -4.5em 2em 0 .5em, -1.5em 2em 0 1em, 1.5em 2em 0 .5em, 4.5em 2em 0 0em;\n\
}\n\
50% {\n\
box-shadow: -4.5em 2em 0 0em, -1.5em 2em 0 .5em, 1.5em 2em 0 1em, 4.5em 2em 0 .5em;\n\
}\n\
60% {\n\
box-shadow: -4.5em 2em 0 -.5em, -1.5em 2em 0 0em, 1.5em 2em 0 .5em, 4.5em 2em 0 1em;\n\
}\n\
70% {\n\
box-shadow: -4.5em 2em 0 -.5em, -1.5em 2em 0 -.5em, 1.5em 2em 0 0em, 4.5em 2em 0 .5em;\n\
}\n\
80% {\n\
box-shadow: -4.5em 2em 0 -.5em, -1.5em 2em 0 -.5em, 1.5em 2em 0 -.5em, 4.5em 2em 0 0em;\n\
}\n\
}'
},
'spinner-5': {
source:
'.spinner-5 {\n\
font-size: 10px;\n\
margin: 2em;\n\
position: relative;\n\
}\n\
.spinner-5,\n\
.spinner-5::before,\n\
.spinner-5::after {\n\
width: 1em;\n\
height: 5em;\n\
background-color: black;\n\
animation: spinner-5 1.5s infinite ease-in-out;\n\
}\n\
.spinner-5::before,\n\
.spinner-5::after {\n\
content: "";\n\
position: absolute;\n\
top: 0em;\n\
}\n\
.spinner-5::before {\n\
left: -1.5em;\n\
animation-delay: -.3s;\n\
}\n\
.spinner-5 {\n\
animation-delay: -.15s;\n\
}\n\
.spinner-5::after {\n\
left: 1.5em;\n\
animation-delay: 0s;\n\
}\n\
@keyframes spinner-5 {\n\
0%, 30%, 100% {\n\
box-shadow: 0em 0em, 0em 0em;\n\
}\n\
15% {\n\
box-shadow: 0em 2em, 0em -2em;\n\
}\n\
}'
},
'spinner-6': {
source:
'.spinner-6 {\n\
font-size: 10px;\n\
width: 2em;\n\
height: 2em;\n\
animation: spinner-6 1s infinite linear;\n\
background-color: transparent;\n\
margin: 2em;\n\
transform: translateZ(0);\n\
}\n\
@keyframes spinner-6 {\n\
0%, 100% {\n\
box-shadow: 0 -2em 0 -.2em, 2em -2em 0 -.3em, 2em 0 0 -.6em, 2em 2em 0 -1em transparent, 0 2em 0 -1em, -2em 2em 0 -1em transparent, -2em 0 0 -.6em, -2em -2em 0 -.3em;\n\
}\n\
12.5% {\n\
box-shadow: 0 -2em 0 -.3em, 2em -2em 0 -.2em, 2em 0 0 -.3em, 2em 2em 0 -.6em, 0 2em 0 -1em transparent, -2em 2em 0 -1em, -2em 0 0 -1em transparent, -2em -2em 0 -.6em;\n\
}\n\
25% {\n\
box-shadow: 0 -2em 0 -.6em, 2em -2em 0 -.3em, 2em 0 0 -.2em, 2em 2em 0 -.3em, 0 2em 0 -.6em, -2em 2em 0 -1em transparent, -2em 0 0 -1em, -2em -2em 0 -1em transparent;\n\
}\n\
37.5% {\n\
box-shadow: 0 -2em 0 -1em transparent, 2em -2em 0 -.6em, 2em 0 0 -.3em, 2em 2em 0 -.2em, 0 2em 0 -.3em, -2em 2em 0 -.6em, -2em 0 0 -1em transparent, -2em -2em 0 -1em;\n\
}\n\
50% {\n\
box-shadow: 0 -2em 0 -1em, 2em -2em 0 -1em transparent, 2em 0 0 -.6em, 2em 2em 0 -.3em, 0 2em 0 -.2em, -2em 2em 0 -.3em, -2em 0 0 -.6em, -2em -2em 0 -1em transparent;\n\
}\n\
62.5% {\n\
box-shadow: 0 -2em 0 -1em transparent, 2em -2em 0 -1em, 2em 0 0 -1em transparent, 2em 2em 0 -.6em, 0 2em 0 -.3em, -2em 2em 0 -.2em, -2em 0 0 -.3em, -2em -2em 0 -.6em;\n\
}\n\
75% {\n\
box-shadow: 0 -2em 0 -.6em, 2em -2em 0 -1em transparent, 2em 0 0 -1em, 2em 2em 0 -1em transparent, 0 2em 0 -.6em, -2em 2em 0 -.3em, -2em 0 0 -.2em, -2em -2em 0 -.3em;\n\
}\n\
87.5% {\n\
box-shadow: 0 -2em 0 -.3em, 2em -2em 0 -.6em, 2em 0 0 -1em transparent, 2em 2em 0 -1em, 0 2em 0 -1em transparent, -2em 2em 0 -.6em, -2em 0 0 -.3em, -2em -2em 0 -.2em;\n\
}\n\
}'
},
'spinner-7': {
source:
'.spinner-7 {\n\
border-radius: 50%;\n\
background-color: transparent;\n\
font-size: 10px;\n\
width: 1.5em;\n\
height: 1.5em;\n\
margin: 3em;\n\
animation: spinner-7 1s infinite steps(8);\n\
}\n\
@keyframes spinner-7 {\n\
0%, 100% {\n\
box-shadow: 0 -2.8em #000, 2em -2em #DDD, 2.8em 0 #DDD, 2em 2em #DDD, 0 2.8em #DDD, -2em 2em #DDD, -2.8em 0 #DDD, -2em -2em #DDD;\n\
}\n\
12.5% {\n\
box-shadow: 0 -2.8em #DDD, 2em -2em #000, 2.8em 0 #DDD, 2em 2em #DDD, 0 2.8em #DDD, -2em 2em #DDD, -2.8em 0 #DDD, -2em -2em #DDD;\n\
}\n\
25% {\n\
box-shadow: 0 -2.8em #DDD, 2em -2em #DDD, 2.8em 0 #000, 2em 2em #DDD, 0 2.8em #DDD, -2em 2em #DDD, -2.8em 0 #DDD, -2em -2em #DDD;\n\
}\n\
37.5% {\n\
box-shadow: 0 -2.8em #DDD, 2em -2em #DDD, 2.8em 0 #DDD, 2em 2em #000, 0 2.8em #DDD, -2em 2em #DDD, -2.8em 0 #DDD, -2em -2em #DDD;\n\
}\n\
50% {\n\
box-shadow: 0 -2.8em #DDD, 2em -2em #DDD, 2.8em 0 #DDD, 2em 2em #DDD, 0 2.8em #000, -2em 2em #DDD, -2.8em 0 #DDD, -2em -2em #DDD;\n\
}\n\
62.5% {\n\
box-shadow: 0 -2.8em #DDD, 2em -2em #DDD, 2.8em 0 #DDD, 2em 2em #DDD, 0 2.8em #DDD, -2em 2em #000, -2.8em 0 #DDD, -2em -2em #DDD;\n\
}\n\
75% {\n\
box-shadow: 0 -2.8em #DDD, 2em -2em #DDD, 2.8em 0 #DDD, 2em 2em #DDD, 0 2.8em #DDD, -2em 2em #DDD, -2.8em 0 #000, -2em -2em #DDD;\n\
}\n\
87.5% {\n\
box-shadow: 0 -2.8em #DDD, 2em -2em #DDD, 2.8em 0 #DDD, 2em 2em #DDD, 0 2.8em #DDD, -2em 2em #DDD, -2.8em 0 #DDD, -2em -2em #000;\n\
}\n\
}'
},
'spinner-8': {
source:
'.spinner-8 {\n\
font-size: 10px;\n\
width: 6em;\n\
height: 6em;\n\
border: 3em solid #CCC;\n\
box-sizing: border-box;\n\
animation: spinner-8 .8s infinite steps(4);\n\
}\n\
@keyframes spinner-8 {\n\
0%, 100% {\n\
border-color: #CCC;\n\
border-top-color: #000;\n\
}\n\
25% {\n\
border-color: #CCC;\n\
border-right-color: #000;\n\
}\n\
50% {\n\
border-color: #CCC;\n\
border-bottom-color: #000;\n\
}\n\
75% {\n\
border-color: #CCC;\n\
border-left-color: #000;\n\
}\n\
}'
},
'spinner-9': {
source:
'.spinner-9 {\n\
font-size: 8px;\n\
border: 1.5em solid #DDD;\n\
border-radius: 50%;\n\
border-top-color: black;\n\
box-sizing: border-box;\n\
width: 10em;\n\
height: 10em;\n\
animation: spinner-9 1s infinite linear;\n\
}\n\
@keyframes spinner-9 {\n\
0% {\n\
transform: rotate(0deg);\n\
}\n\
100% {\n\
transform: rotate(360deg);\n\
}\n\
}'
},
'spinner-10': {
source:
'.spinner-10 {\n\
font-size: 8px;\n\
border: 1.5em solid black;\n\
border-radius: 50%;\n\
border-top-color: transparent;\n\
box-sizing: border-box;\n\
width: 10em;\n\
height: 10em;\n\
position: relative;\n\
animation: spinner-10 1s infinite linear;\n\
}\n\
.spinner-10::before,\n\
.spinner-10::after {\n\
content: "";\n\
position: absolute;\n\
border-radius: 50%;\n\
background-color: black;\n\
width: 1.5em;\n\
height: 1.5em;\n\
left: 2.75em;\n\
top: -1.5em;\n\
transform-origin: .75em 5em;\n\
}\n\
.spinner-10::before {\n\
transform: rotate(-45deg);\n\
}\n\
.spinner-10::after {\n\
transform: rotate(45deg);\n\
}\n\
@keyframes spinner-10 {\n\
0% {\n\
transform: rotate(0deg);\n\
}\n\
100% {\n\
transform: rotate(360deg);\n\
}\n\
}'
},
'spinner-11': {
source:
'.spinner-11 {\n\
font-size: 8px;\n\
width: 10em;\n\
height: 10em;\n\
position: relative;\n\
border: 1.5em solid transparent;\n\
border-top-color: black;\n\
border-bottom-color: black;\n\
border-radius: 50%;\n\
box-sizing: border-box;\n\
animation: spinner-11 1.5s infinite linear;\n\
}\n\
.spinner-11::before,\n\
.spinner-11::after {\n\
content: "";\n\
position: absolute;\n\
box-sizing: border-box;\n\
border: 1em solid transparent;\n\
}\n\
.spinner-11::before {\n\
left: 5.4em;\n\
top: -.6em;\n\
border-right-color: black;\n\
border-bottom-color: black;\n\
}\n\
.spinner-11::after {\n\
left: -.3em;\n\
top: 5.6em;\n\
border-top-color: black;\n\
border-left-color: black;\n\
}\n\
@keyframes spinner-11 {\n\
0% {\n\
transform: rotate(0deg);\n\
}\n\
100% {\n\
transform: rotate(360deg);\n\
}\n\
}'
},
'spinner-12': {
source:
'.spinner-12 {\n\
font-size: 8px;\n\
position: relative;\n\
animation: spinner-12 2s infinite linear;\n\
}\n\
.spinner-12,\n\
.spinner-12::before,\n\
.spinner-12::after {\n\
width: 10em;\n\
height: 10em;\n\
border: 1.5em solid transparent;\n\
border-top-color: black;\n\
border-radius: 50%;\n\
box-sizing: border-box;\n\
}\n\
.spinner-12::before,\n\
.spinner-12::after {\n\
content: "";\n\
position: absolute;\n\
left: -1.5em;\n\
top: -1.5em;\n\
}\n\
.spinner-12::before {\n\
transform: rotate(120deg);\n\
}\n\
.spinner-12::after {\n\
transform: rotate(240deg);\n\
}\n\
@keyframes spinner-12 {\n\
0% {\n\
transform: rotate(0deg);\n\
}\n\
100% {\n\
transform: rotate(360deg);\n\
}\n\
}'
},
'spinner-13': {
source:
'.spinner-13 {\n\
font-size: 8px;\n\
width: 10em;\n\
height: 10em;\n\
position: relative;\n\
}\n\
.spinner-13::before,\n\
.spinner-13::after {\n\
content: "";\n\
position: absolute;\n\
background-color: black;\n\
left: 0em;\n\
top: 0em;\n\
animation-duration: 1s;\n\
animation-timing-function: linear;\n\
animation-iteration-count: infinite;\n\
}\n\
.spinner-13::before {\n\
animation-name: spinner-13-before;\n\
}\n\
.spinner-13::after {\n\
animation-name: spinner-13-after;\n\
}\n\
@keyframes spinner-13-before {\n\
0%, 100% {\n\
width: 1.5em;\n\
height: 1.5em;\n\
left: 0em;\n\
top: 0em;\n\
}\n\
24% {\n\
width: 10em;\n\
height: 1.5em;\n\
left: 0em;\n\
top: 0em;\n\
}\n\
25% {\n\
width: 1.5em;\n\
height: 1.5em;\n\
left: 8.5em;\n\
top: 0em;\n\
}\n\
49% {\n\
width: 1.5em;\n\
height: 10em;\n\
left: 8.5em;\n\
top: 0em;\n\
}\n\
50% {\n\
width: 1.5em;\n\
height: 1.5em;\n\
left: 8.5em;\n\
top: 8.5em;\n\
}\n\
74% {\n\
width: 10em;\n\
height: 1.5em;\n\
left: 0em;\n\
top: 8.5em;\n\
}\n\
75% {\n\
width: 1.5em;\n\
height: 1.5em;\n\
left: 0em;\n\
top: 8.5em;\n\
}\n\
99% {\n\
width: 1.5em;\n\
height: 10em;\n\
left: 0em;\n\
top: 0em;\n\
}\n\
}\n\
@keyframes spinner-13-after {\n\
0%, 100% {\n\
width: 1.5em;\n\
height: 10em;\n\
left: 0em;\n\
top: 0em;\n\
}\n\
24% {\n\
width: 1.5em;\n\
height: 1.5em;\n\
left: 0em;\n\
top: 0em;\n\
}\n\
25% {\n\
width: 10em;\n\
height: 1.5em;\n\
left: 0em;\n\
top: 0em;\n\
}\n\
49% {\n\
width: 1.5em;\n\
height: 1.5em;\n\
left: 8.5em;\n\
top: 0em;\n\
}\n\
50% {\n\
width: 1.5em;\n\
height: 10em;\n\
left: 8.5em;\n\
top: 0em;\n\
}\n\
74% {\n\
width: 1.5em;\n\
height: 1.5em;\n\
left: 8.5em;\n\
top: 8.5em;\n\
}\n\
75% {\n\
width: 10em;\n\
height: 1.5em;\n\
left: 0em;\n\
top: 8.5em;\n\
}\n\
99% {\n\
width: 1.5em;\n\
height: 1.5em;\n\
left: 0em;\n\
top: 8.5em;\n\
}\n\
}'
},
'spinner-14': {
source:
'.spinner-14 {\n\
font-size: 10px;\n\
width: 10em;\n\
height: 10em;\n\
background-color: white;\n\
border-radius: 50%;\n\
border-top: 1.4em solid transparent;\n\
border-left: 1em solid black;\n\
box-sizing: border-box;\n\
position: relative;\n\
animation: spinner-14 .7s infinite linear;\n\
}\n\
.spinner-14::before {\n\
content: "";\n\
width: 1.2em;\n\
height: 1.2em;\n\
position: absolute;\n\
left: 3.5em;\n\
top: -1.2em;\n\
background-color: black;\n\
border-radius: 50%;\n\
transform-origin: .5em 5.4em;\n\
transform: rotate(-45deg);\n\
}\n\
@keyframes spinner-14 {\n\
0% {\n\
transform: rotate(0deg);\n\
}\n\
100% {\n\
transform: rotate(360deg);\n\
}\n\
}'
},
'spinner-15': {
source:
'.spinner-15 {\n\
font-size: 10px;\n\
width: 10em;\n\
height: 10em;\n\
border-radius: 50%;\n\
border: 1.2em solid #DDD;\n\
box-sizing: border-box;\n\
position: relative;\n\
}\n\
.spinner-15::before {\n\
content: "";\n\
width: 1em;\n\
height: 1em;\n\
position: absolute;\n\
left: .2em;\n\
top: .2em;\n\
transform-origin: 3.6em 3.6em;\n\
background-color: black;\n\
border-radius: 50%;\n\
animation: spinner-15 1.5s infinite linear;\n\
}\n\
@keyframes spinner-15 {\n\
0% {\n\
transform: rotate(0deg);\n\
}\n\
100% {\n\
transform: rotate(360deg);\n\
}\n\
}'
},
'spinner-16': {
source:
'.spinner-16 {\n\
font-size: 10px;\n\
width: 10em;\n\
height: 10em;\n\
position: relative;\n\
}\n\
.spinner-16::before,\n\
.spinner-16::after {\n\
content: "";\n\
background-color: transparent;\n\
position: absolute;\n\
left: 4.5em;\n\
top: 4.5em;\n\
width: 1em;\n\
height: 1em;\n\
border-radius: 50%;\n\
box-sizing: border-box;\n\
border: .2em solid black;\n\
animation: spinner-16 1s infinite cubic-bezier(0, 0.25, 0.75, 1);\n\
}\n\
.spinner-16::before {\n\
animation-delay: 0s;\n\
}\n\
.spinner-16::after {\n\
animation-delay: -.5s;\n\
}\n\
@keyframes spinner-16 {\n\
0% {\n\
opacity: 1;\n\
transform: scale(0);\n\
}\n\
100% {\n\
opacity: 0;\n\
transform: scale(10);\n\
}\n\
}'
},
'spinner-17': {
source:
'.spinner-17 {\n\
font-size: 8px;\n\
width: 10em;\n\
height: 10em;\n\
position: relative;\n\
animation: spinner-17 .9s infinite linear;\n\
}\n\
.spinner-17::before,\n\
.spinner-17::after {\n\
content: "";\n\
position: absolute;\n\
}\n\
.spinner-17::before {\n\
width: 10em;\n\
height: 5em;\n\
left: 0;\n\
top: 0;\n\
background: linear-gradient(45deg, transparent, transparent 1.5em, transparent 1.5em, black 6em, black 6em, black);\n\
border-radius: 5em 5em 0 0;\n\
}\n\
.spinner-17::after {\n\
animation-delay: -.5s;\n\
width: 7em;\n\
height: 3.5em;\n\
left: 1.5em;\n\
top: 1.5em;\n\
background-color: white;\n\
border-radius: 3.5em 3.5em 0 0;\n\
}\n\
@keyframes spinner-17 {\n\
0% {\n\
transform: rotate(0deg);\n\
}\n\
100% {\n\
transform: rotate(360deg);\n\
}\n\
}'
},
'spinner-heart': {
title: 'heart beating',
source:
'.spinner-heart {\n\
font-size: 10px;\n\
width: 10em;\n\
height: 10em;\n\
position: relative;\n\
background-color: transparent;\n\
animation: spinner-heart .5s infinite cubic-bezier(1, 0, 1, 1);\n\
}\n\
.spinner-heart::before,\n\
.spinner-heart::after {\n\
content: "";\n\
position: absolute;\n\
background-color: black;\n\
}\n\
.spinner-heart::before {\n\
left: 2.5em;\n\
top: 0;\n\
width: 5em;\n\
height: 7.5em;\n\
border-radius: 2.5em 2.5em 0 0;\n\
}\n\
.spinner-heart::after {\n\
left: 0;\n\
top: 2.5em;\n\
width: 7.5em;\n\
height: 5em;\n\
border-radius: 2.5em 0 0 2.5em;\n\
}\n\
@keyframes spinner-heart {\n\
0% {\n\
transform: scale(0.7) rotate(45deg);\n\
opacity: 1;\n\
}\n\
100% {\n\
transform: scale(1) rotate(45deg);\n\
opacity: 0.7;\n\
}\n\
}'
},
'spinner-hydrogen': {
title: 'hydrogen atom',
source:
'.spinner-hydrogen {\n\
font-size: 10px;\n\
width: 2em;\n\
height: 2em;\n\
margin: 2.5em;\n\
border-radius: 50%;\n\
box-shadow: 0 3em 0 -.5em;\n\
background-color: black;\n\
animation: spinner-hydrogen 1.5s infinite linear;\n\
}\n\
.spinner-hydrogen::before {\n\
content: "";\n\
width: 6em;\n\
height: 6em;\n\
position: absolute;\n\
left: -2.1em;\n\
top: -2.1em;\n\
border: .1em solid black;\n\
border-radius: 50%;\n\
}\n\
@keyframes spinner-hydrogen {\n\
0% {\n\
transform: rotate(0deg);\n\
}\n\
100% {\n\
transform: rotate(360deg);\n\
}\n\
}'
},
'spinner-yin-yang': {
title: 'yin-yang',
source:
'.spinner-yin-yang {\n\
font-size: 10px;\n\
width: 11em;\n\
height: 11em;\n\
border: .5em solid black;\n\
border-radius: 50%;\n\
background: linear-gradient(to bottom, black, black 50%, white 50%);\n\
position: relative;\n\
box-sizing: border-box;\n\
animation: spinner-yin-yang 2s infinite linear;\n\
}\n\
.spinner-yin-yang::before,\n\
.spinner-yin-yang::after {\n\
content: "";\n\
position: absolute;\n\
width: 1em;\n\
height: 1em;\n\
border-radius: 50%;\n\
}\n\
.spinner-yin-yang::before {\n\
left: 2em;\n\
top: 4.5em;\n\
background-color: #000;\n\
box-shadow: 0 0 0 2em #FFF;\n\
}\n\
.spinner-yin-yang::after {\n\
left: 7em;\n\
top: 4.5em;\n\
background-color: #FFF;\n\
box-shadow: 0 0 0 2em #000;\n\
}\n\
@keyframes spinner-yin-yang {\n\
0% {\n\
transform: rotate(0deg);\n\
}\n\
100% {\n\
transform: rotate(360deg);\n\
}\n\
}'
},
'spinner-clock': {
title: 'clock',
source:
'.spinner-clock {\n\
font-size: 10px;\n\
width: 10em;\n\
height: 10em;\n\
position: relative;\n\
border: .4em solid black;\n\
border-radius: 50%;\n\
box-sizing: border-box;\n\
}\n\
.spinner-clock::before,\n\
.spinner-clock::after {\n\
content: "";\n\
width: .4em;\n\
display: inline-block;\n\
background-color: black;\n\
position: absolute;\n\
left: 4.4em;\n\
border-radius: .4em;\n\
}\n\
.spinner-clock::before {\n\
height: 4em;\n\
transform-origin: .2em 3.8em;\n\
top: .8em;\n\
animation: spinner-clock 1s infinite linear;\n\
}\n\
.spinner-clock::after {\n\
height: 3em;\n\
transform-origin: .2em 2.8em;\n\
top: 1.8em;\n\
animation: spinner-clock 12s infinite linear;\n\
}\n\
@keyframes spinner-clock {\n\
0% {\n\
transform: rotate(0deg);\n\
}\n\
100% {\n\
transform: rotate(360deg);\n\
}\n\
}'
},
'spinner-counter': {
title: 'counter',
source:
'.spinner-counter {\n\
font-size: 20px;\n\
width: 2.2em;\n\
height: 2em;\n\
position: relative;\n\
overflow: hidden;\n\
line-height: 1;\n\
}\n\
.spinner-counter::before,\n\
.spinner-counter::after {\n\
animation: spinner-counter 1s infinite steps(10);\n\
content: "0123456789";\n\
width: .5em;\n\
height: 10em;\n\
top: 0em;\n\
font-size: 2em;\n\
font-family: monospace;\n\
font-weight: bold;\n\
word-wrap: break-word;\n\
text-overflow: clip;\n\
position: absolute;\n\
}\n\
.spinner-counter::before {\n\
left: 0em;\n\
animation-duration: 10s;\n\
}\n\
.spinner-counter::after {\n\
left: .5em;\n\
animation-duration: 1s;\n\
}\n\
@keyframes spinner-counter {\n\
0% {\n\
transform: translateY(0);\n\
}\n\
100% {\n\
transform: translateY(-10em);\n\
}\n\
}'
},
'spinner-zebra': {
source:
'.spinner-zebra {\n\
font-size: 10px;\n\
width: 10em;\n\
height: 4em;\n\
border: .5em solid black;\n\
box-sizing: border-box;\n\
overflow: hidden;\n\
}\n\
.spinner-zebra::before {\n\
content: "";\n\
background: repeating-linear-gradient(45deg, black, black 1em, white 1em, white 2em);\n\
position: relative;\n\
display: inline-block;\n\
width: 10em;\n\
animation: spinner-zebra .5s infinite linear;\n\
}\n\
@keyframes spinner-zebra {\n\
0% {\n\
height: 3em;\n\
}\n\
100% {\n\
height: 5.8em;\n\
}\n\
}'
},
'spinner-collisions': {
source:
'.spinner-collisions {\n\
font-size: 5px;\n\
width: 18em;\n\
height: 14em;\n\
border: 2em solid black;\n\
border-left-color: black;\n\
box-sizing: border-box;\n\
position: relative;\n\
}\n\
.spinner-collisions::before {\n\
content: "";\n\
width: 2em;\n\
height: 2em;\n\
position: absolute;\n\
background-color: black;\n\
border-radius: 50%;\n\
animation: spinner-collisions-x 1.5s infinite linear, spinner-collisions-y 3.5s infinite linear;\n\
}\n\
@keyframes spinner-collisions-x {\n\
0%, 100% {\n\
left: 0em;\n\
}\n\
50% {\n\
left: 12em;\n\
}\n\
}\n\
@keyframes spinner-collisions-y {\n\
0%, 100% {\n\
top: 4em;\n\
}\n\
25% {\n\
top: 8em;\n\
}\n\
75% {\n\
top: 0em;\n\
}\n\
}'
},
'spinner-glider': {
title: 'glider from <a href="https://en.wikipedia.org/wiki/Conway\'s_Game_of_Life" target="_blank">Life</a>',
source:
'.spinner-glider {\n\
font-size: 8px;\n\
width: 11em;\n\
height: 11em;\n\
border: .5em solid black;\n\
background-color: white;\n\
position: relative;\n\
overflow: hidden;\n\
}\n\
.spinner-glider::before,\n\
.spinner-glider::after {\n\
content: "";\n\
position: absolute;\n\
animation-duration: .4s;\n\
animation-timing-function: linear;\n\
animation-iteration-count: infinite;\n\
}\n\
.spinner-glider::before {\n\
width: 2em;\n\
height: 2em;\n\
background-color: transparent;\n\
animation-name: spinner-glider-shift, spinner-glider-cells;\n\
}\n\
.spinner-glider::after {\n\
width: 14em;\n\
height: 14em;\n\
background: repeating-linear-gradient(0deg, #666, #666 1px, transparent 1px, transparent 2em), repeating-linear-gradient(90deg, #666, #666 1px, transparent 1px, transparent 2em);\n\
animation-name: spinner-glider-shift;\n\
}\n\
@keyframes spinner-glider-shift {\n\
0% {\n\
left: -1em;\n\
top: 0em;\n\
}\n\
100% {\n\
left: -3em;\n\
top: -2em;\n\
}\n\
}\n\
@keyframes spinner-glider-cells {\n\
0%, 24% {\n\
box-shadow: 6em 2em, 4em 4em transparent, 6em 4em transparent, 8em 4em, 4em 6em, 6em 6em, 8em 6em, 10em 6em transparent, 6em 8em transparent, 8em 8em transparent, 10em 8em transparent;\n\
}\n\
25%, 49% {\n\
box-shadow: 6em 2em transparent, 4em 4em, 6em 4em transparent, 8em 4em, 4em 6em transparent, 6em 6em, 8em 6em, 10em 6em transparent, 6em 8em, 8em 8em transparent, 10em 8em transparent;\n\
}\n\
50%, 74% {\n\
box-shadow: 6em 2em transparent, 4em 4em transparent, 6em 4em transparent, 8em 4em, 4em 6em, 6em 6em transparent, 8em 6em, 10em 6em transparent, 6em 8em, 8em 8em, 10em 8em transparent;\n\
}\n\
75%, 99% {\n\
box-shadow: 6em 2em transparent, 4em 4em transparent, 6em 4em, 8em 4em transparent, 4em 6em transparent, 6em 6em transparent, 8em 6em, 10em 6em, 6em 8em, 8em 8em, 10em 8em transparent;\n\
}\n\
100% {\n\
box-shadow: 6em 2em transparent, 4em 4em transparent, 6em 4em transparent, 8em 4em, 4em 6em transparent, 6em 6em transparent, 8em 6em transparent, 10em 6em, 6em 8em, 8em 8em, 10em 8em;\n\
}\n\
}'
},
'spinner-wi-fi': {
title: 'wi-fi signal',
source:
'.spinner-wi-fi {\n\
font-size: 10px;\n\
width: 2em;\n\
height: 2em;\n\
border-radius: 50%;\n\
background-color: black;\n\
position: relative;\n\
margin: 3.5em 3.5em 1.5em 1.5em;\n\
animation: spinner-wi-fi-core 1.2s infinite linear;\n\
animation-delay: -.4s;\n\
}\n\
.spinner-wi-fi::before,\n\
.spinner-wi-fi::after {\n\
content: "";\n\
position: absolute;\n\
background-color: transparent;\n\
border: 1em solid transparent;\n\
border-top-color: black;\n\
border-radius: 50%;\n\
animation: spinner-wi-fi-signal 1.2s infinite linear;\n\
}\n\
.spinner-wi-fi::before {\n\
width: 3em;\n\
height: 3em;\n\
left: -1.5em;\n\
top: -1.5em;\n\
animation-delay: -.2s;\n\
}\n\
.spinner-wi-fi::after {\n\
width: 6em;\n\
height: 6em;\n\
left: -3em;\n\
top: -3em;\n\
animation-delay: 0s;\n\
}\n\
@keyframes spinner-wi-fi-core {\n\
0%, 30%, 100% {\n\
background-color: black;\n\
}\n\
15% {\n\
background-color: #888;\n\
}\n\
}\n\
@keyframes spinner-wi-fi-signal {\n\
0%, 30%, 100% {\n\
border-top-color: black;\n\
transform: scale(1) rotate(45deg);\n\
}\n\
15% {\n\
border-top-color: #888;\n\
transform: scale(1.1) rotate(45deg);\n\
}\n\
}'
}
};
window.onload = function() {
var content = document.querySelector('.content'),
overlay = document.querySelector('.overlay'),
source = document.querySelector('.source');
content.innerHTML += [
{ name: 'spinner-1', show: true },
{ name: 'spinner-2', show: true },
{ name: 'spinner-3', show: true },
{ name: 'spinner-4', show: true },
{ name: 'spinner-5', show: true },
{ name: 'spinner-6', show: true },
{ name: 'spinner-7', show: true },
{ name: 'spinner-8', show: true },
{ name: 'spinner-9', show: true },
{ name: 'spinner-10', show: false },
{ name: 'spinner-11', show: true },
{ name: 'spinner-12', show: true },
{ name: 'spinner-17', show: true },
{ name: 'spinner-13', show: true },
{ name: 'spinner-14', show: true },
{ name: 'spinner-15', show: false },
{ name: 'spinner-16', show: true },
{ name: 'spinner-wi-fi', show: true },
{ name: 'spinner-heart', show: true },
{ name: 'spinner-hydrogen', show: true },
{ name: 'spinner-yin-yang', show: true },
{ name: 'spinner-clock', show: true },
{ name: 'spinner-counter', show: true },
{ name: 'spinner-zebra', show: true },
{ name: 'spinner-collisions', show: true },
{ name: 'spinner-glider', show: true }
].filter(function(n) {
return n.show;
}).map(function(n) {
var data = spinners[n.name] || {},
html = '<div class="' + n.name + '"></div>';
if (data.title) {
html += '<div class="spinner-title">' + data.title + '</div>';
}
if (data.source) {
html += '<div class="spinner-source"></div>';
}
return '<div class="spinner-container" data-spinner="' + n.name + '">' + html + '</div>';
}).join('');
content.onclick = function(e) {
if (e.target.classList.contains('spinner-source')) {
var spinnerName = e.target.parentNode.getAttribute('data-spinner'),
spinnerData = spinners[spinnerName];
overlay.classList.remove('hidden');
source.classList.remove('hidden');
source.querySelector('.source-html').innerText = document.querySelector('.' + spinnerName).outerHTML;
source.querySelector('.source-css').innerHTML = '<pre>' + spinnerData.source + '</pre>';
source.querySelector('.source-css').scrollLeft = 0;
source.querySelector('.source-css').scrollTop = 0;
}
};
overlay.onclick = function() {
overlay.classList.add('hidden');
source.classList.add('hidden');
};
overlay.onwheel = source.querySelector('.source-html').onwheel = function(e) {
e.preventDefault();
};
source.onwheel = function(e) {
var css = source.querySelector('.source-css');
if ((css.scrollHeight - css.scrollTop === css.clientHeight && e.deltaY > 0) ||
(css.scrollTop == 0 && e.deltaY < 0)) {
e.preventDefault();
}
};
};
<file_sep>/morse/main.js
๏ปฟwindow.onload = function() {
var
textElem = document.getElementById('text'),
frequencyElem = document.getElementById('frequency'),
speedWPMElem = document.getElementById('speedWPM'),
speedTimeUnitElem = document.getElementById('speedTimeUnit'),
playElem = document.getElementById('play'),
downloadElem = document.getElementById('download');
if (!Morse.hasOwnProperty('play')) {
playElem.disabled = true;
playElem.setAttribute('title', 'AudioContext is not supported');
}
textElem.value = 'Morse code';
frequencyElem.value = Morse.frequency;
speedWPMElem.value = Morse.WPM;
speedTimeUnitElem.value = Morse.timeUnit;
frequencyElem.onchange = function() {
Morse.frequency = this.value;
document.getElementById('frequencyValue').innerHTML = this.value;
};
frequencyElem.onchange();
speedWPMElem.onchange = function(e) {
Morse.WPM = this.value;
document.getElementById('speedWPMValue').innerHTML = this.value;
if (Morse.timeUnit != speedTimeUnitElem.value && e) {
speedTimeUnitElem.value = Morse.timeUnit;
speedTimeUnitElem.onchange();
}
};
speedWPMElem.onchange();
speedTimeUnitElem.onchange = function(e) {
Morse.timeUnit = this.value;
document.getElementById('speedTimeUnitValue').innerHTML = this.value;
if (Morse.WPM != speedWPMElem.value && e) {
speedWPMElem.value = Morse.WPM;
speedWPMElem.onchange();
}
};
speedTimeUnitElem.onchange();
textElem.oninput = function() {
document.getElementById('value').value = Morse.encode(textElem.value);
};
textElem.oninput();
playElem.onclick = function() {
if (Morse.isPlaying()) {
Morse.stop();
} else {
Morse.play(textElem.value);
}
};
downloadElem.onclick = function() {
Morse.download({
message: textElem.value
});
}
document.addEventListener('morse-signal-on', function() {
playElem.innerHTML = 'Stop';
});
document.addEventListener('morse-signal-off', function() {
playElem.innerHTML = 'Play';
});
};
|
84bd9f341c182844b33c6b182f29067b61a91a48
|
[
"JavaScript"
] | 2 |
JavaScript
|
0xD34F/0xD34F.github.io
|
adf28f74659a465caf3597c81f0508c79bf1d4cb
|
216411c9d56ba4158b856676299025d82e80dab0
|
refs/heads/master
|
<file_sep># Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from . import communication_pb2 as communication__pb2
class CommunicatorStub(object):
"""Missing associated documentation comment in .proto file"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.PushModel = channel.unary_unary(
'/gossip.Communicator/PushModel',
request_serializer=communication__pb2.Model.SerializeToString,
response_deserializer=communication__pb2.Reply.FromString,
)
class CommunicatorServicer(object):
"""Missing associated documentation comment in .proto file"""
def PushModel(self, request, context):
"""Call RPC Server's SendModel to send the model to RPC Server
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CommunicatorServicer_to_server(servicer, server):
rpc_method_handlers = {
'PushModel': grpc.unary_unary_rpc_method_handler(
servicer.PushModel,
request_deserializer=communication__pb2.Model.FromString,
response_serializer=communication__pb2.Reply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'gossip.Communicator', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Communicator(object):
"""Missing associated documentation comment in .proto file"""
@staticmethod
def PushModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/gossip.Communicator/PushModel',
communication__pb2.Model.SerializeToString,
communication__pb2.Reply.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
<file_sep>#!/bin/bash
# Compile for python
python -m grpc_tools.protoc -I. --python_out=. --grpc_python_out=. communication.proto<file_sep>import argparse
import os
import random
import time
import torch
import torch.nn as nn
import torch.utils.data
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
from torchsummary import summary
import numpy as np
from CifarCNN import CifarCNN
from GossipAggregator import GossipAggregator
from communication.rpc_server import server_from_peers_file
def seed_everything(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_everything()
def main():
"""
Main entry point to the system
"""
print("Cuda:", torch.cuda.is_available())
if torch.cuda.is_available():
dev = "cuda:0"
else:
dev = "cpu"
print("Using device", dev)
device = torch.device(dev)
# Handle parsing command line arguments
parser = argparse.ArgumentParser(description="")
parser.add_argument('--id', default="test", type=str)
parser.add_argument('--print_freq', '-p', default=10, type=int, metavar='N', help='print frequency (default: 10)')
parser.add_argument('--batch_size', type=int, default=128, help="Training data batch size")
parser.add_argument('--data_dir', type=str, default="./data", help="Data directory")
parser.add_argument("--gossip", type=bool, default=False, help="Gossip mode")
parser.add_argument("--indices", type=str, default=None, help="Indices file")
parser.add_argument("--peers", type=str, default="peers.txt", help="Peers file")
args = parser.parse_args()
# Create GRPC API
api = None
if args.gossip:
peers = args.peers
api = server_from_peers_file(peers)
# Load the training and test data
batch_size_train = args.batch_size
batch_size_test = args.batch_size
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_set = torchvision.datasets.CIFAR10(
root=args.data_dir, train=True, download=True, transform=transform_train)
if args.indices is not None:
indices = torch.load(args.indices)
else:
indices = list(range(len(train_set)))
dataset = torch.utils.data.Subset(train_set, indices)
train_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size_train, shuffle=True, num_workers=1, pin_memory=True)
test_set = torchvision.datasets.CIFAR10(
root=args.data_dir, train=False, download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(
test_set, batch_size=batch_size_test, shuffle=False, num_workers=0, pin_memory=True)
run_dir = os.path.join("./", "runs", args.id)
writer = SummaryWriter(log_dir=run_dir)
# The training process
model = CifarCNN(device)
summary(model, (3, 32, 32))
gossip = None
if args.gossip:
gossip = GossipAggregator(data_points=len(indices), server_api=api)
optimizer = optim.AdamW(model.parameters(), 0.001, betas=(0.9, 0.999), weight_decay=1e-2)
lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[100, 150], gamma=0.1)
for epoch in range(200):
print("Training epoch:", epoch)
model.train_epoch(train_loader, args, epoch, optimizer, writer)
print("Evaluating epoch:", epoch)
model.eval_epoch(test_loader, args, writer)
lr_scheduler.step()
if args.gossip:
# Handle the gossip learning updates
flattened = model.flatten()
print("Pushing updates:", epoch)
gossip.push_model(flattened)
print("Receiving updates:", epoch)
flattened = gossip.receive_updates(flattened)
model.unflatten(flattened)
print("Evaluating post receive:", epoch)
model.eval_epoch(test_loader, args, writer)
if __name__ == '__main__':
main()
<file_sep>import torch
import torch.nn as nn
import time
import numpy as np
from helper import AverageMeter
def unflatten_block(block, index, weights, device):
"""
Unflatten the given block into the model at specified index and transfer to the device
"""
block_state_dict = block.state_dict()
for key, value in block_state_dict.items():
param = value.cpu().detach().numpy()
size = param.shape
param = param.flatten()
num_elements = len(param)
weight = weights[index:index + num_elements]
index += num_elements
np_arr = np.array(weight).reshape(size)
block_state_dict[key] = torch.tensor(np_arr).to(device)
block.load_state_dict(block_state_dict)
return index
class CifarCNN(torch.nn.Module):
def __init__(self, device):
"""
Initialize the deep learning model on the given device
"""
super(CifarCNN, self).__init__()
self.layer = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, padding=1, stride=1),
nn.ReLU(),
nn.BatchNorm2d(32),
nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1),
nn.ReLU(),
nn.BatchNorm2d(32),
nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1),
nn.ReLU(),
nn.BatchNorm2d(32),
nn.MaxPool2d(2),
nn.Dropout(0.2),
nn.Conv2d(32, 64, kernel_size=3, padding=1, stride=1),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Conv2d(64, 64, kernel_size=3, padding=1, stride=1),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.MaxPool2d(2),
nn.Dropout(0.3),
nn.Conv2d(64, 128, kernel_size=3, padding=1, stride=1),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.Conv2d(128, 128, kernel_size=3, padding=1, stride=1),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.MaxPool2d(2),
nn.Dropout(0.4),
nn.Flatten(),
nn.Linear(2048, 10),
nn.LogSoftmax(dim=1)
).to(device)
self.device = device
def forward(self, X):
"""
Calculate loss using the model
"""
loss = self.layer(X)
return loss
def train_epoch(self, loader, args, epoch, optimizer, writer):
"""
Train for a single epoch
"""
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
criterion = torch.nn.NLLLoss()
self.train()
end = time.time()
step = epoch * len(loader)
for i, (x_batch, y_batch) in enumerate(loader):
data_time.update(time.time() - end)
x_batch = x_batch.to(self.device)
y_batch = y_batch.to(self.device)
prediction = self.forward(x_batch)
loss = criterion(prediction, y_batch)
losses.update(loss.item(), args.batch_size)
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 or i == len(loader) - 1:
writer.add_scalar('train/loss', losses.val, step + i)
print(
'Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
epoch, i, len(loader) - 1, batch_time=batch_time,
data_time=data_time, loss=losses), flush=True)
def eval_epoch(self, loader, args, writer):
"""
Test the model using the given data loader
"""
batch_time = AverageMeter()
losses = AverageMeter()
criterion = nn.NLLLoss()
# switch to evaluate mode
self.eval()
step = 0 * len(loader)
total = 0
correct = 0
with torch.no_grad():
end = time.time()
for i, (x_batch, y_batch) in enumerate(loader):
x_batch = x_batch.to(self.device)
y_batch = y_batch.to(self.device)
prediction = self.forward(x_batch)
loss = criterion(prediction, y_batch)
_, predicted = torch.max(prediction.data, 1)
total += y_batch.size(0)
correct += (predicted == y_batch).sum().item()
# measure accuracy and record loss
losses.update(loss.item(), args.batch_size)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 or i == len(loader) - 1:
writer.add_scalar('eval/loss', losses.val, step + i)
print(
'Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
.format(i, len(loader) - 1, batch_time=batch_time, loss=losses), flush=True)
print('Accuracy of the network on the test images: {accuracy:.4f}%'.format(accuracy=100 * correct / total))
def flatten(self):
"""
Flatten the model into a linear array on CPU
"""
all_params = np.array([])
for key, value in self.layer.state_dict().items():
param = value.cpu().detach().numpy().flatten()
all_params = np.append(all_params, param)
return all_params
def unflatten(self, weights):
"""
Load the weights from a linear array on CPU to the actual model on the device
:param weights:
:return:
"""
index = 0
index = unflatten_block(self.layer, index, weights, self.device)
<file_sep>#!/usr/bin/env bash
export PYTHONUNBUFFERED=1
python main.py --id train --batch_size 128 --print_freq 100 --gossip=true --indices indices_"$1".pt &>../out200-gossip-"$1".txt
<file_sep>import abc
import io
import numpy as np
from communication.rpc_server import ServerAPI
class GossipAggregator:
def __init__(self, data_points, server_api: ServerAPI):
self.alpha = float(data_points) / 10000
self.client = server_api
def push_model(self, model):
"""
Updates the alpha and sends the model to a peer. Will restore the original alpha
if the transmission of update fails for some reason.
"""
# Update alpha
print("Alpha:", self.alpha, "->", self.alpha / 2)
self.alpha /= 2
# Compress to byte array
file = io.BytesIO()
np.savez_compressed(file, model=model, alpha=self.alpha)
data = file.getbuffer()
# Send
res = self.client.push_model(data.tobytes())
if not res:
self.alpha *= 2
print("Failed transmission, restoring alpha to", self.alpha)
def receive_updates(self, model):
"""
Processes all received updates
"""
for elem in self.client.get_updates():
# Write the bytes into memory for numpy to load from.
file = io.BytesIO()
file.write(elem)
file.seek(0)
content = np.load(file)
model2, alpha2 = content['model'], content['alpha']
total = self.alpha + alpha2
print("Alpha:", self.alpha, "->", total)
model = (self.alpha * model + alpha2 * model2) / total
self.alpha = total
return model
<file_sep># Fully Decentralized Federated Learning with Gossip Aggregation
## Requirements
- Python 3.6+
- Following Python libraries
- torch 1.5.0+
- torchvision
- tensorboardX
- torchsummary
- grpcio
```bash
PYTHONUNBUFFERED=1 nohup python main.py --id train --batch_size 128 --print_freq 100 &>../out200-classical.txt &
```
```bash
nohup ./run-gossip.sh <data-id>
```
## License
Copyright ยฉ 2020 <NAME>, <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
<file_sep>import logging
import grpc
from . import communication_pb2_grpc
from . import communication_pb2
class RpcClient:
def __init__(self, server_address='localhost:50051'):
"""
Initializes grpc communication channel and stub
:param server_address:
"""
self.server_address = server_address
self.channel = grpc.insecure_channel(self.server_address)
self.stub = communication_pb2_grpc.CommunicatorStub(self.channel)
def __exit__(self, exc_type, exc_val, exc_tb):
self.channel.close()
def send_model(self, data: bytes):
"""
Sends model to the grpc server we initialized this client with.
:param data:
:return:
"""
try:
response = self.stub.PushModel(communication_pb2.Model(data=data))
return response.result
except grpc.RpcError as e:
logging.error(e)
return False
<file_sep>from concurrent import futures
import logging
import random
import socket
import grpc
from .rpc_client import RpcClient
from . import communication_pb2
from . import communication_pb2_grpc
from queue import Queue, Empty
class Server(communication_pb2_grpc.CommunicatorServicer):
"""
GRPC server.
"""
def __init__(self, update_queue):
self.received_updates = update_queue
def PushModel(self, request, context):
"""
Called by other peers to add model to received update queue. The received request is Model protobuf object
"""
print("Received model")
self.received_updates.put(request.data)
return communication_pb2.Reply(result=True)
class ServerAPI:
def __init__(self, peers, update_queue, server):
self.peer_addrs = peers
self.peers = [RpcClient(addr) for addr in self.peer_addrs]
self.received_updates = update_queue
self.server = server
def push_model(self, data) -> bool:
"""
If a peer exists, send to a peer, else return false
:param data: Model update
:return: Success bool
"""
if len(self.peers) > 0:
peer = random.choice(self.peers)
print("Sending to", peer.server_address)
return peer.send_model(data)
return False
def get_updates(self):
"""
Returns received updates in the queue
:return:
"""
print("Draining updates")
for model in _drain(self.received_updates):
yield model
def serve(port="50051", peers=None):
if peers is None:
peers = []
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
queue = Queue()
server_api = ServerAPI(peers, queue, server)
communication_pb2_grpc.add_CommunicatorServicer_to_server(Server(queue), server)
server.add_insecure_port('[::]:{}'.format(port))
server.start()
return server_api
def get_ip():
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
return ip
def read_peers(file_name, me):
with open(file_name) as p:
peers = p.readlines()
peers = [peer.strip() for peer in peers]
peers = [peer for peer in peers if peer != me]
return peers
def server_from_peers_file(file_name):
me = get_ip() + ":50051"
print("Serving on", me)
peers = read_peers(file_name, me)
print("Peers:", ", ".join(peers))
return serve(port="50051", peers=peers)
def _drain(q):
while True:
try:
yield q.get_nowait()
except Empty:
break
|
c1e53d6f8d2046f3e60ebd8afb6d2691a80056d1
|
[
"Markdown",
"Python",
"Shell"
] | 9 |
Python
|
Kausta/federated-learning-gossip-aggregation
|
e5bdc1a3c05ff68d2ce4aee58d177fe901359c3c
|
ab822d2b3478b0e509e70f0193f1ed68acf5bb83
|
refs/heads/master
|
<repo_name>Dkemp04/informatik2-uebung-wwu-gruppe04<file_sep>/trunk/Test2/src/TollerTest.java
/**
*
*/
/**
* @author <NAME>
*
*/
public class TollerTest {
}
<file_sep>/trunk/Aufgabe 13/src/Node.java
class Node<K extends Comparable<K>,D>
{
K key;
D data;
Node<K,D> left;
Node<K,D> right;
Node(K k, D d, Node<K,D> l, Node<K,D> r){
key=k; data=d; left=l; right=r;}
public void insertNode(K k, D data)
{
if (key.compareTo(k)<0)
if (right == null)
right = new Node<K,D>(k, data, null, null);
else right.insertNode(k, data);
else if (left == null)
left = new Node<K,D>(k, data, null, null);
else left.insertNode(k, data);
}
public D findNode(K k) throws Exception
{
if (key.compareTo(k)<0)
if (right == null) throw new Exception(k+" nicht gefunden");
else return right.findNode(k);
else if (k.compareTo(key)<0)
if (left == null) throw new Exception(k+" nicht gefunden");
else return left.findNode(k);
else return data;
}
private Node<K,D> findMaxPred()
{
if (right.right == null) return this;
else return right.findMaxPred();
}
public Node<K,D> deleteNode(K k)
{
if (key.compareTo(k)<0)
{
if (right != null) right = right.deleteNode(k);
return this;
}
else if (k.compareTo(key)<0)
{
if (left != null) left = left.deleteNode(k);
return this;
}
if (left == null) return right;
if (right == null) return left;
if (left.right == null) {left.right = right; return left;}
Node<K,D> maxPred = left.findMaxPred();
Node<K,D> max = maxPred.right;
maxPred.right = max.left;
max.left = left;
max.right = right;
return max;
}
}<file_sep>/trunk/Aufgabe 14/src/Median.java
/**
*
*/
/**
* @author <NAME>
*
*/
public class Median
{
}<file_sep>/trunk/Aufgabe 13/src/Queue.java
import java.util.*;
class Queue <K extends Comparable<K>, D>
{
ArrayList<K> liste = new ArrayList<K> ();
Queue ()
{
liste = new ArrayList<K> ();
}
public boolean isEmpty ()
{
return liste.isEmpty();
}
public void enqueue (Node<K,D> node)
{
//liste.add(node);
}
public Node<K,D> dequeue ()
{
return null;
}
}
|
0b87304e08e5cc217bb2eda7d2515b45f3dcdaf7
|
[
"Java"
] | 4 |
Java
|
Dkemp04/informatik2-uebung-wwu-gruppe04
|
fae7373cad24c9c9de2281a76dbda9e771bc6364
|
3ea16ca18fde2d8c3e26247df6909dd0873b1702
|
refs/heads/master
|
<file_sep><?php
use Mockery as m;
use Payum\Core\Gateway;
use Payum\Core\Bridge\Spl\ArrayObject;
use Recca0120\LaravelPayum\CoreGatewayFactory;
class CoreGatewayFactoryTest extends PHPUnit_Framework_TestCase
{
public function tearDown()
{
m::close();
}
public function test_build_action()
{
/*
|------------------------------------------------------------
| Set
|------------------------------------------------------------
*/
$actionInterface = m::mock('Payum\Core\Action\ActionInterface');
$app = m::mock('Illuminate\Contracts\Foundation\Application, ArrayAccess');
$defaultConfig = new ArrayObject();
$coreGatewayFactory = m::mock(new CoreGatewayFactory($app, []));
$gateway = new Gateway();
/*
|------------------------------------------------------------
| Expectation
|------------------------------------------------------------
*/
$coreGatewayFactory
->shouldDeferMissing()
->shouldAllowMockingProtectedMethods();
$app
->shouldReceive('offsetGet')->with('payum.action.foo1')->once()->andReturn($actionInterface);
$defaultConfig->defaults([
'payum.prepend_actions' => [],
'payum.action.foo1' => 'payum.action.foo1',
]);
$coreGatewayFactory->buildActions($gateway, $defaultConfig);
/*
|------------------------------------------------------------
| Assertion
|------------------------------------------------------------
*/
$this->assertAttributeSame([$actionInterface], 'actions', $gateway);
}
public function test_build_api()
{
/*
|------------------------------------------------------------
| Set
|------------------------------------------------------------
*/
$api = m::mock('stdClass');
$app = m::mock('Illuminate\Contracts\Foundation\Application, ArrayAccess');
$defaultConfig = new ArrayObject();
$coreGatewayFactory = m::mock(new CoreGatewayFactory($app, []));
$gateway = new Gateway();
/*
|------------------------------------------------------------
| Expectation
|------------------------------------------------------------
*/
$coreGatewayFactory
->shouldDeferMissing()
->shouldAllowMockingProtectedMethods();
$app
->shouldReceive('offsetGet')->with('payum.api.foo1')->once()->andReturn($api);
$defaultConfig->defaults([
'payum.prepend_apis' => [],
'payum.api.foo1' => 'payum.api.foo1',
]);
$coreGatewayFactory->buildApis($gateway, $defaultConfig);
/*
|------------------------------------------------------------
| Assertion
|------------------------------------------------------------
*/
$this->assertAttributeSame([$api], 'apis', $gateway);
}
public function test_build_extensions()
{
/*
|------------------------------------------------------------
| Set
|------------------------------------------------------------
*/
$extensionInterface = m::mock('Payum\Core\Extension\ExtensionInterface');
$app = m::mock('Illuminate\Contracts\Foundation\Application, ArrayAccess');
$defaultConfig = new ArrayObject();
$coreGatewayFactory = m::mock(new CoreGatewayFactory($app, []));
$gateway = new Gateway();
/*
|------------------------------------------------------------
| Expectation
|------------------------------------------------------------
*/
$coreGatewayFactory
->shouldDeferMissing()
->shouldAllowMockingProtectedMethods();
$app->shouldReceive('offsetGet')->with('payum.extension.foo1')->once()->andReturn($extensionInterface);
$defaultConfig->defaults([
'payum.prepend_extensions' => [],
'payum.extension.foo1' => 'payum.extension.foo1',
]);
$coreGatewayFactory->buildExtensions($gateway, $defaultConfig);
/*
|------------------------------------------------------------
| Assertion
|------------------------------------------------------------
*/
$extensions = $this->readAttribute($gateway, 'extensions');
$this->assertAttributeSame([$extensionInterface], 'extensions', $extensions);
}
}
<file_sep><?php
namespace Recca0120\LaravelPayum;
use Illuminate\Support\Arr;
use Payum\Core\PayumBuilder;
use Payum\Core\Model\ArrayObject;
use Illuminate\Filesystem\Filesystem;
use Payum\Core\GatewayFactoryInterface;
use Payum\Core\Storage\StorageInterface;
use Payum\Core\Model\Token as PayumToken;
use Payum\Core\Storage\FilesystemStorage;
use Payum\Core\Model\Payment as PayumPayment;
use Recca0120\LaravelPayum\Model\GatewayConfig;
use Illuminate\Contracts\Foundation\Application;
use Payum\Core\Registry\StorageRegistryInterface;
use Recca0120\LaravelPayum\Security\TokenFactory;
use Recca0120\LaravelPayum\Storage\EloquentStorage;
use Recca0120\LaravelPayum\Model\Token as EloquentToken;
use Payum\Core\Bridge\Symfony\Security\HttpRequestVerifier;
use Recca0120\LaravelPayum\Model\Payment as EloquentPayment;
class PayumBuilderManager
{
/**
* $payumBuilder.
*
* @var \Payum\Core\PayumBuilder
*/
protected $payumBuilder;
/**
* $filesystem.
*
* @var \Illuminate\Filesystem\Filesystem
*/
protected $filesystem;
/**
* $app.
*
* @var \Illuminate\Contracts\Foundation\Application
*/
protected $app;
/**
* $routeAliasName.
*
* @var string
*/
protected $routeAliasName;
/**
* $tokenStorageType.
*
* @var string
*/
protected $tokenStorageType;
/**
* $gatewayConfigStorageType.
*
* @var string
*/
protected $gatewayConfigStorageType;
/**
* $storagePath.
*
* @var string
*/
protected $storagePath;
/**
* $gatewayConfigs.
*
* @var array
*/
protected $gatewayConfigs;
/**
* __construct.
*
* @method __construct
*
* @param \Payum\Core\PayumBuilder $payumBuilder
* @param \Illuminate\Filesystem\Filesystem $filesystem
* @param \Illuminate\Contracts\Foundation\Application $app
* @param array $config
*/
public function __construct(PayumBuilder $payumBuilder, Filesystem $filesystem, Application $app, $config = [])
{
$this->payumBuilder = $payumBuilder;
$this->filesystem = $filesystem;
$this->app = $app;
$this->tokenStorageType = Arr::get($config, 'storage.token', 'filesystem');
$this->gatewayConfigStorageType = Arr::get($config, 'storage.gatewayConfig', 'filesystem');
$this->routeAliasName = Arr::get($config, 'route.as');
$this->storagePath = Arr::get($config, 'path');
$this->gatewayConfigs = Arr::get($config, 'gatewayConfigs', []);
}
/**
* createTokenFactory.
*
* @method createTokenFactory
*
* @param \Payum\Core\Storage\StorageInterface $tokenStorage
* @param \Payum\Core\Registry\StorageRegistryInterface $registry
*
* @return \Recca0120\LaravelPayum\Security\TokenFactory
*/
public function createTokenFactory(StorageInterface $tokenStorage, StorageRegistryInterface $registry)
{
return $this->app->make(TokenFactory::class, [$tokenStorage, $registry]);
}
/**
* createHttpRequestVerifier.
*
* @method createHttpRequestVerifier
*
* @param \Payum\Core\Storage\StorageInterface $tokenStorage
*
* @return \Payum\Core\Bridge\Symfony\Security\HttpRequestVerifier
*/
public function createHttpRequestVerifier(StorageInterface $tokenStorage)
{
return $this->app->make(HttpRequestVerifier::class, [$tokenStorage]);
}
/**
* createCoreGatewayFactoryConfig.
*
* @method createCoreGatewayFactoryConfig
*
* @param array $defaultConfig
*
* @return \Recca0120\LaravelPayum\CoreGatewayFactory
*/
public function createCoreGatewayFactoryConfig($defaultConfig)
{
return $this->app->make(CoreGatewayFactory::class, [
'defaultConfig' => $defaultConfig,
]);
}
/**
* createGenericTokenFactoryPaths.
*
* @method createGenericTokenFactoryPaths
*
* @return array
*/
public function createGenericTokenFactoryPaths()
{
return [
'authorize' => $this->routeAliasName.'authorize',
'capture' => $this->routeAliasName.'capture',
'notify' => $this->routeAliasName.'notify',
'payout' => $this->routeAliasName.'payout',
'refund' => $this->routeAliasName.'refund',
'cancel' => $this->routeAliasName.'cancel',
'sync' => $this->routeAliasName.'sync',
'done' => $this->routeAliasName.'done',
];
}
/**
* createEloquentStorage.
*
* @method createEloquentStorage
*
* @param string $modelClass
*
* @return \Recca0120\LaravelPayum\Storage\EloquentStorage
*/
public function createEloquentStorage($modelClass)
{
return $this->app->make(EloquentStorage::class, [
$modelClass,
$this->app,
]);
}
/**
* createFilesystemStorage.
*
* @method createFilesystemStorage
*
* @param string $modelClass
* @param string $idProperty
*
* @return \Payum\Core\Storage\FilesystemStorage
*/
public function createFilesystemStorage($modelClass, $idProperty = 'payum_id')
{
return $this->app->make(FilesystemStorage::class, [
$this->storagePath,
$modelClass,
$idProperty,
]);
}
/**
* setTokenFactory.
*
* @method setTokenFactory
*
* @return self
*/
protected function setTokenFactory()
{
$this->payumBuilder->setTokenFactory([$this, 'createTokenFactory']);
return $this;
}
/**
* setHttpRequestVerifier.
*
* @method setHttpRequestVerifier
*
* @return self
*/
protected function setHttpRequestVerifier()
{
$this->payumBuilder->setHttpRequestVerifier([$this, 'createHttpRequestVerifier']);
return $this;
}
/**
* setCoreGatewayFactory.
*
* @method setCoreGatewayFactory
*
* @return self
*/
protected function setCoreGatewayFactory()
{
$this->payumBuilder->setCoreGatewayFactory([$this, 'createCoreGatewayFactoryConfig']);
return $this;
}
/**
* setCoreGatewayFactoryConfig.
*
* @method setCoreGatewayFactoryConfig
*
* @return self
*/
protected function setCoreGatewayFactoryConfig()
{
$this->payumBuilder->setCoreGatewayFactoryConfig([
'payum.action.obtain_credit_card' => 'payum.action.obtain_credit_card',
'payum.action.render_template' => 'payum.action.render_template',
'payum.extension.update_payment_status' => 'payum.extension.update_payment_status',
]);
return $this;
}
/**
* setGenericTokenFactoryPaths.
*
* @method setGenericTokenFactoryPaths
*
* @return self
*/
protected function setGenericTokenFactoryPaths()
{
$this->payumBuilder->setGenericTokenFactoryPaths($this->createGenericTokenFactoryPaths());
return $this;
}
/**
* setStorage.
*
* @method setStorage
*
* @return self
*/
protected function setStorage()
{
return ($this->tokenStorageType === 'eloquent') ?
$this->setEloquentStorage() : $this->setFilesystemStorage();
}
/**
* setEloquentStorage.
*
* @method setEloquentStorage
*
* @return self
*/
protected function setEloquentStorage()
{
$this->payumBuilder
->setTokenStorage($this->createEloquentStorage(EloquentToken::class))
->addStorage(EloquentPayment::class, $this->createEloquentStorage(EloquentPayment::class));
return $this;
}
/**
* setFilesystemStorage.
*
* @method setFilesystemStorage
*
* @return self
*/
protected function setFilesystemStorage()
{
if ($this->filesystem->isDirectory($this->storagePath) === false) {
$this->filesystem->makeDirectory($this->storagePath, 0777, true);
}
$this->payumBuilder
->setTokenStorage($this->createFilesystemStorage(PayumToken::class, 'hash'))
->addStorage(PayumPayment::class, $this->createFilesystemStorage(PayumPayment::class, 'number'))
->addStorage(ArrayObject::class, $this->createFilesystemStorage(ArrayObject::class));
return $this;
}
/**
* setGatewayConfigStorage.
*
* @method setGatewayConfigStorage
*
* @return self
*/
protected function setGatewayConfigStorage()
{
if ($this->gatewayConfigStorageType === 'eloquent') {
$storage = $this->createEloquentStorage(GatewayConfig::class);
$this->payumBuilder->setGatewayConfigStorage($storage);
foreach ($storage->findBy([]) as $gatewayConfig) {
$gatewayName = $gatewayConfig->getGatewayName();
$factoryName = $gatewayConfig->getFactoryName();
$this->gatewayConfigs[$gatewayName] = array_merge(
Arr::get($this->gatewayConfigs, $gatewayName, []),
['factory' => $factoryName],
$gatewayConfig->getConfig()
);
}
}
return $this;
}
/**
* getGatewayConfigs.
*
* @method getGatewayConfigs
*
* @return array
*/
protected function getGatewayConfigs()
{
return $this->gatewayConfigs;
}
/**
* setGatewayConfig.
*
* @method setGatewayConfig
*
* @return self
*/
protected function setGatewayConfig()
{
foreach ($this->gatewayConfigs as $gatewayName => $gatewayConfig) {
$factoryName = Arr::get($gatewayConfig, 'factory');
if (empty($factoryName) === false && class_exists($factoryName) === true) {
$this->payumBuilder
->addGatewayFactory($gatewayName, function ($gatewayConfig, GatewayFactoryInterface $coreGatewayFactory) use ($factoryName) {
return $this->app->make($factoryName, [$gatewayConfig, $coreGatewayFactory]);
});
}
$gatewayConfig['factory'] = $gatewayName;
$this->payumBuilder->addGateway($gatewayName, $gatewayConfig);
}
return $this;
}
/**
* getBuilder.
*
* @method getBuilder
*
* @return \Payum\Core\PayumBuilder
*/
public function getBuilder()
{
$this
->setTokenFactory()
->setHttpRequestVerifier()
->setCoreGatewayFactory()
->setCoreGatewayFactoryConfig()
->setGenericTokenFactoryPaths()
->setStorage()
->setGatewayConfigStorage()
->setGatewayConfig();
return $this->payumBuilder;
}
}
<file_sep><?php
use Mockery as m;
use Payum\Core\Bridge\Spl\ArrayObject;
use Recca0120\LaravelPayum\PayumBuilderManager;
class PayumBuilderManagerTest extends PHPUnit_Framework_TestCase
{
public function tearDown()
{
m::close();
}
public function test_create_token_factory()
{
/*
|------------------------------------------------------------
| Set
|------------------------------------------------------------
*/
$payumBuilder = m::mock('Payum\Core\PayumBuilder');
$filesystem = m::mock('Illuminate\Filesystem\Filesystem');
$app = m::mock('Illuminate\Contracts\Foundation\Application');
$config = [];
$manager = new PayumBuilderManager($payumBuilder, $filesystem, $app, $config);
$storageInterface = m::mock('Payum\Core\Storage\StorageInterface');
$storageRegistryInterface = m::mock('Payum\Core\Registry\StorageRegistryInterface');
/*
|------------------------------------------------------------
| Expectation
|------------------------------------------------------------
*/
$app->shouldReceive('make')->with('Recca0120\LaravelPayum\Security\TokenFactory', [
$storageInterface,
$storageRegistryInterface,
])->once();
/*
|------------------------------------------------------------
| Assertion
|------------------------------------------------------------
*/
$manager->createTokenFactory($storageInterface, $storageRegistryInterface);
}
public function test_create_http_request_verifier()
{
/*
|------------------------------------------------------------
| Set
|------------------------------------------------------------
*/
$payumBuilder = m::mock('Payum\Core\PayumBuilder');
$filesystem = m::mock('Illuminate\Filesystem\Filesystem');
$app = m::mock('Illuminate\Contracts\Foundation\Application');
$config = [];
$manager = new PayumBuilderManager($payumBuilder, $filesystem, $app, $config);
$storageInterface = m::mock('Payum\Core\Storage\StorageInterface');
/*
|------------------------------------------------------------
| Expectation
|------------------------------------------------------------
*/
$app->shouldReceive('make')->with('Payum\Core\Bridge\Symfony\Security\HttpRequestVerifier', [
$storageInterface,
])->once();
/*
|------------------------------------------------------------
| Assertion
|------------------------------------------------------------
*/
$manager->createHttpRequestVerifier($storageInterface);
}
public function test_create_core_gateway_factory_config()
{
/*
|------------------------------------------------------------
| Set
|------------------------------------------------------------
*/
$payumBuilder = m::mock('Payum\Core\PayumBuilder');
$filesystem = m::mock('Illuminate\Filesystem\Filesystem');
$app = m::mock('Illuminate\Contracts\Foundation\Application');
$config = [];
$manager = new PayumBuilderManager($payumBuilder, $filesystem, $app, $config);
$storageInterface = m::mock('Payum\Core\Storage\StorageInterface');
/*
|------------------------------------------------------------
| Expectation
|------------------------------------------------------------
*/
$app->shouldReceive('make')->with('Recca0120\LaravelPayum\CoreGatewayFactory', [
'defaultConfig' => [],
])->once();
/*
|------------------------------------------------------------
| Assertion
|------------------------------------------------------------
*/
$manager->createCoreGatewayFactoryConfig([]);
}
public function test_create_generic_token_factory_paths()
{
/*
|------------------------------------------------------------
| Set
|------------------------------------------------------------
*/
$payumBuilder = m::mock('Payum\Core\PayumBuilder');
$filesystem = m::mock('Illuminate\Filesystem\Filesystem');
$app = m::mock('Illuminate\Contracts\Foundation\Application');
$config = [
'route.as' => 'payum.',
];
$manager = new PayumBuilderManager($payumBuilder, $filesystem, $app, $config);
/*
|------------------------------------------------------------
| Expectation
|------------------------------------------------------------
*/
/*
|------------------------------------------------------------
| Assertion
|------------------------------------------------------------
*/
$this->assertSame([
'authorize' => $config['route.as'].'authorize',
'capture' => $config['route.as'].'capture',
'notify' => $config['route.as'].'notify',
'payout' => $config['route.as'].'payout',
'refund' => $config['route.as'].'refund',
'cancel' => $config['route.as'].'cancel',
'sync' => $config['route.as'].'sync',
'done' => $config['route.as'].'done',
], $manager->createGenericTokenFactoryPaths());
}
public function test_create_eloquent_storage()
{
/*
|------------------------------------------------------------
| Set
|------------------------------------------------------------
*/
$payumBuilder = m::mock('Payum\Core\PayumBuilder');
$filesystem = m::mock('Illuminate\Filesystem\Filesystem');
$app = m::mock('Illuminate\Contracts\Foundation\Application');
$config = [
'route.as' => 'payum.',
];
$manager = new PayumBuilderManager($payumBuilder, $filesystem, $app, $config);
$modelClass = 'fooClass';
/*
|------------------------------------------------------------
| Expectation
|------------------------------------------------------------
*/
$app->shouldReceive('make')->with('Recca0120\LaravelPayum\Storage\EloquentStorage', [
$modelClass,
$app,
])->once();
/*
|------------------------------------------------------------
| Assertion
|------------------------------------------------------------
*/
$manager->createEloquentStorage($modelClass);
}
public function test_create_filesystem_storage()
{
/*
|------------------------------------------------------------
| Set
|------------------------------------------------------------
*/
$payumBuilder = m::mock('Payum\Core\PayumBuilder');
$filesystem = m::mock('Illuminate\Filesystem\Filesystem');
$app = m::mock('Illuminate\Contracts\Foundation\Application');
$config = [
'path' => 'fooPath',
];
$manager = new PayumBuilderManager($payumBuilder, $filesystem, $app, $config);
$modelClass = 'fooClass';
$idProperty = 'fooId';
/*
|------------------------------------------------------------
| Expectation
|------------------------------------------------------------
*/
$app->shouldReceive('make')->with('Payum\Core\Storage\FilesystemStorage', [
$config['path'],
$modelClass,
$idProperty,
])->once();
/*
|------------------------------------------------------------
| Assertion
|------------------------------------------------------------
*/
$manager->createFilesystemStorage($modelClass, $idProperty);
}
public function test_set_eloquent_storage()
{
/*
|------------------------------------------------------------
| Set
|------------------------------------------------------------
*/
$payumBuilder = m::mock('Payum\Core\PayumBuilder');
$filesystem = m::mock('Illuminate\Filesystem\Filesystem');
$app = m::mock('Illuminate\Contracts\Foundation\Application');
$config = [
'storage.token' => 'eloquent',
];
$manager = m::mock(new PayumBuilderManager($payumBuilder, $filesystem, $app, $config))
->shouldAllowMockingProtectedMethods();
$storageInterface = m::mock('Payum\Core\Storage\StorageInterface');
/*
|------------------------------------------------------------
| Expectation
|------------------------------------------------------------
*/
$app
->shouldReceive('make')->with('Recca0120\LaravelPayum\Storage\EloquentStorage', [
'Recca0120\LaravelPayum\Model\Token',
$app,
])->once()->andReturn($storageInterface)
->shouldReceive('make')->with('Recca0120\LaravelPayum\Storage\EloquentStorage', [
'Recca0120\LaravelPayum\Model\Payment',
$app,
])->once()->andReturn($storageInterface);
$payumBuilder
->shouldReceive('setTokenStorage')->with($storageInterface)->once()->andReturnSelf()
->shouldReceive('addStorage')->with('Recca0120\LaravelPayum\Model\Payment', $storageInterface)->once()->andReturnSelf();
/*
|------------------------------------------------------------
| Assertion
|------------------------------------------------------------
*/
$manager->setStorage();
}
public function test_set_filesystem_storage()
{
/*
|------------------------------------------------------------
| Set
|------------------------------------------------------------
*/
$payumBuilder = m::mock('Payum\Core\PayumBuilder');
$filesystem = m::mock('Illuminate\Filesystem\Filesystem');
$app = m::mock('Illuminate\Contracts\Foundation\Application');
$config = [
'path' => 'fooPath',
'storage.token' => 'filesystem',
];
$manager = m::mock(new PayumBuilderManager($payumBuilder, $filesystem, $app, $config))
->shouldAllowMockingProtectedMethods();
$storageInterface = m::mock('Payum\Core\Storage\StorageInterface');
/*
|------------------------------------------------------------
| Expectation
|------------------------------------------------------------
*/
$app
->shouldReceive('make')->with('Payum\Core\Storage\FilesystemStorage', [
$config['path'],
'Payum\Core\Model\Token',
'hash',
])->once()->andReturn($storageInterface)
->shouldReceive('make')->with('Payum\Core\Storage\FilesystemStorage', [
$config['path'],
'Payum\Core\Model\Payment',
'number',
])->once()->andReturn($storageInterface)
->shouldReceive('make')->with('Payum\Core\Storage\FilesystemStorage', [
$config['path'],
'Payum\Core\Model\ArrayObject',
'payum_id',
])->once()->andReturn($storageInterface);
$filesystem
->shouldReceive('isDirectory')->with($config['path'])->andReturn(false)
->shouldReceive('makeDirectory')->with($config['path'], 0777, true)->andReturn(true);
$payumBuilder
->shouldReceive('setTokenStorage')->with($storageInterface)->andReturnSelf()
->shouldReceive('addStorage')->with('Payum\Core\Model\Payment', $storageInterface)->andReturnSelf()
->shouldReceive('addStorage')->with('Payum\Core\Model\ArrayObject', $storageInterface)->andReturnSelf();
/*
|------------------------------------------------------------
| Assertion
|------------------------------------------------------------
*/
$manager->setStorage();
}
public function test_set_gateway_config_storage()
{
/*
|------------------------------------------------------------
| Set
|------------------------------------------------------------
*/
$payumBuilder = m::mock('Payum\Core\PayumBuilder');
$filesystem = m::mock('Illuminate\Filesystem\Filesystem');
$app = m::mock('Illuminate\Contracts\Foundation\Application');
$config = [
'storage.gatewayConfig' => 'eloquent',
];
$manager = m::mock(new PayumBuilderManager($payumBuilder, $filesystem, $app, $config))
->shouldAllowMockingProtectedMethods();
$storageInterface = m::mock('Payum\Core\Storage\StorageInterface');
$eloquentGatewayConfig = m::mock('GatewayConfigRecca0120\LaravelPayum\Model\GatewayConfig');
/*
|------------------------------------------------------------
| Expectation
|------------------------------------------------------------
*/
$app
->shouldReceive('make')->with('Recca0120\LaravelPayum\Storage\EloquentStorage', [
'Recca0120\LaravelPayum\Model\GatewayConfig',
$app,
])->once()->andReturn($storageInterface);
$payumBuilder
->shouldReceive('setGatewayConfigStorage')->with($storageInterface)->once();
$eloquentGatewayConfig
->shouldReceive('getGatewayName')->once()->andReturn('fooGateway')
->shouldReceive('getFactoryName')->once()->andReturn('fooFactoryName')
->shouldReceive('getConfig')->once()->andReturn([
'foo' => 'bar',
]);
$storageInterface
->shouldReceive('findBy')->with([])->andReturn([$eloquentGatewayConfig]);
/*
|------------------------------------------------------------
| Assertion
|------------------------------------------------------------
*/
$manager->setGatewayConfigStorage();
$this->assertSame([
'fooGateway' => [
'factory' => 'fooFactoryName',
'foo' => 'bar',
],
], $manager->getGatewayConfigs());
}
public function test_set_gateway_config()
{
/*
|------------------------------------------------------------
| Set
|------------------------------------------------------------
*/
$payumBuilder = m::mock('Payum\Core\PayumBuilder');
$filesystem = m::mock('Illuminate\Filesystem\Filesystem');
$app = m::mock('Illuminate\Contracts\Foundation\Application');
$config = [
'gatewayConfigs' => [
'gatewayName' => [
'factory' => 'factory',
'username' => 'username',
'password' => '<PASSWORD>',
],
'gatewayName2' => [
'factory' => 'stdClass',
'username' => 'username',
'password' => '<PASSWORD>',
],
],
];
$manager = m::mock(new PayumBuilderManager($payumBuilder, $filesystem, $app, $config))
->shouldAllowMockingProtectedMethods();
$defaultConfig = new ArrayObject([
'payum.template.obtain_credit_card' => 'foo.payum.template.obtain_credit_card',
]);
$gatewayFactory = m::mock('Payum\Core\GatewayFactoryInterface');
/*
|------------------------------------------------------------
| Expectation
|------------------------------------------------------------
*/
$gatewayConfigs = $config['gatewayConfigs'];
foreach ($gatewayConfigs as $gatewayName => $gatewayConfig) {
if (class_exists($gatewayConfig['factory']) === true) {
$payumBuilder->shouldReceive('addGatewayFactory')->with($gatewayName, m::type('Closure'))->andReturnUsing(function ($name, $closure) use ($defaultConfig, $gatewayFactory) {
return $closure($defaultConfig, $gatewayFactory);
});
$app->shouldReceive('make')->with($gatewayConfig['factory'], m::any());
}
$gatewayConfig['factory'] = $gatewayName;
$payumBuilder->shouldReceive('addGateway')->with($gatewayName, $gatewayConfig);
}
/*
|------------------------------------------------------------
| Assertion
|------------------------------------------------------------
*/
$manager->setGatewayConfig();
}
public function test_get_builder()
{
/*
|------------------------------------------------------------
| Set
|------------------------------------------------------------
*/
$payumBuilder = m::mock('Payum\Core\PayumBuilder');
$filesystem = m::mock('Illuminate\Filesystem\Filesystem');
$app = m::mock('Illuminate\Contracts\Foundation\Application');
$config = [
'path' => 'fooPath',
];
$manager = new PayumBuilderManager($payumBuilder, $filesystem, $app, $config);
$storageInterface = m::mock('Payum\Core\Storage\StorageInterface');
/*
|------------------------------------------------------------
| Expectation
|------------------------------------------------------------
*/
$app
->shouldReceive('make')->with('Payum\Core\Storage\FilesystemStorage', [
$config['path'],
'Payum\Core\Model\Token',
'hash',
])->once()->andReturn($storageInterface)
->shouldReceive('make')->with('Payum\Core\Storage\FilesystemStorage', [
$config['path'],
'Payum\Core\Model\Payment',
'number',
])->once()->andReturn($storageInterface)
->shouldReceive('make')->with('Payum\Core\Storage\FilesystemStorage', [
$config['path'],
'Payum\Core\Model\ArrayObject',
'payum_id',
])->once()->andReturn($storageInterface);
$filesystem
->shouldReceive('isDirectory')->with($config['path'])->andReturn(false)
->shouldReceive('makeDirectory')->with($config['path'], 0777, true)->andReturn(true);
$payumBuilder
->shouldReceive('setTokenFactory')->with([$manager, 'createTokenFactory'])->once()
->shouldReceive('setHttpRequestVerifier')->with([$manager, 'createHttpRequestVerifier'])->once()
->shouldReceive('setCoreGatewayFactory')->with([$manager, 'createCoreGatewayFactoryConfig'])->once()
->shouldReceive('setCoreGatewayFactoryConfig')->with([
'payum.action.obtain_credit_card' => 'payum.action.obtain_credit_card',
'payum.action.render_template' => 'payum.action.render_template',
'payum.extension.update_payment_status' => 'payum.extension.update_payment_status',
])->once()
->shouldReceive('setGenericTokenFactoryPaths')
->shouldReceive('setTokenStorage')->with($storageInterface)->andReturnSelf()
->shouldReceive('addStorage')->with('Payum\Core\Model\Payment', $storageInterface)->andReturnSelf()
->shouldReceive('addStorage')->with('Payum\Core\Model\ArrayObject', $storageInterface)->andReturnSelf();
/*
|------------------------------------------------------------
| Assertion
|------------------------------------------------------------
*/
$this->assertSame($payumBuilder, $manager->getBuilder());
}
}
<file_sep><?php
namespace Recca0120\LaravelPayum;
use Payum\Core\Gateway;
use Payum\Core\Bridge\Spl\ArrayObject;
use Illuminate\Contracts\Foundation\Application;
use Payum\Core\CoreGatewayFactory as PayumCoreGatewayFactory;
class CoreGatewayFactory extends PayumCoreGatewayFactory
{
/**
* $app.
*
* @var \Illuminate\Contracts\Foundation\Application
*/
protected $app;
/**
* __construct.
*
* @method __construct
*
* @param \Illuminate\Contracts\Foundation\Application $app
* @param array $defaultConfig
*/
public function __construct(Application $app, array $defaultConfig = [])
{
parent::__construct($defaultConfig);
$this->app = $app;
}
/**
* buildActions.
*
* @method buildActions
*
* @param \Payum\Core\Gateway $gateway
* @param \Payum\Core\Bridge\Spl\ArrayObject $config
*/
protected function buildActions(Gateway $gateway, ArrayObject $config)
{
foreach ($config as $name => $value) {
if (0 === strpos($name, 'payum.action') && false == is_object($config[$name])) {
$config[$name] = $this->app[$config[$name]];
}
}
parent::buildActions($gateway, $config);
}
/**
* buildApis.
*
* @method buildApis
*
* @param \Payum\Core\Gateway $gateway
* @param \Payum\Core\Bridge\Spl\ArrayObject $config
*/
protected function buildApis(Gateway $gateway, ArrayObject $config)
{
foreach ($config as $name => $value) {
if (0 === strpos($name, 'payum.api') && false == is_object($config[$name])) {
$config[$name] = $this->app[$config[$name]];
}
}
parent::buildApis($gateway, $config);
}
/**
* buildExtensions.
*
* @method buildExtensions
*
* @param \Payum\Core\Gateway $gateway
* @param \Payum\Core\Bridge\Spl\ArrayObject $config
*/
protected function buildExtensions(Gateway $gateway, ArrayObject $config)
{
foreach ($config as $name => $value) {
if (0 === strpos($name, 'payum.extension') && false == is_object($config[$name])) {
$config[$name] = $this->app[$config[$name]];
}
}
parent::buildExtensions($gateway, $config);
}
}
|
e0808b6753e5652af9630f6ffe4dd76542014598
|
[
"PHP"
] | 4 |
PHP
|
manishnakar/laravel-payum
|
d09b942dbd875b1abcf42f4b796931c5fdaa6a2c
|
0a68f35d4e6f41192ad4ae9de25dbb03fff99554
|
refs/heads/main
|
<file_sep>/* eslint-env mocha */
const expect = require('chai').expect
const nock = require('nock')
const path = require('path')
const CsvlintValidator = require('../lib/csvlint/validator')
const exampleUrlHost = 'http://example.com'
const exampleUrlPath = '/example.csv'
const emptyUrlPath = '/empty.csv'
function stubUrl (urlPath = exampleUrlPath, headers = { }) {
const files = {
[exampleUrlPath]: 'valid.csv',
[emptyUrlPath]: 'empty.csv',
'/crlf.csv': 'windows-line-endings.csv'
}
const file = files[urlPath]
if (!headers['Content-Type']) {
headers['Content-Type'] = 'text/csv'
}
nock(exampleUrlHost)
.get(urlPath)
.replyWithFile(
200,
path.join(__dirname, 'fixtures', file),
headers
)
}
function loadFromUrl (urlPath = exampleUrlPath, headers = { }) {
stubUrl(urlPath, headers)
return `${exampleUrlHost}${urlPath}`
}
describe('Csvlint::Validator', () => {
before(() => {
nock(exampleUrlHost).get('/.well-known/csvm').reply(404)
nock(exampleUrlHost).get('/example.csv-metadata.json').reply(404)
nock(exampleUrlHost).get('/csv-metadata.json').reply(404)
})
it('should validate from a URL', async () => {
const validator = await CsvlintValidator(loadFromUrl())
expect(validator.isValid).to.eql(true)
expect(validator.data.length).to.eql(3)
expect(validator.expectedColumns_).to.eql(3)
expect(validator.colCounts_.length).to.eql(3)
})
it('should validate from a file path', async () => {
const validator = await CsvlintValidator(
path.join(__dirname, 'fixtures', 'valid.csv')
)
expect(validator.isValid).to.eql(true)
expect(validator.data.length).to.eql(3)
expect(validator.expectedColumns_).to.eql(3)
expect(validator.colCounts_.length).to.eql(3)
})
it('should validate from a file path including whitespace', async () => {
const validator = await CsvlintValidator(
path.join(__dirname, 'fixtures', 'white space in filename.csv')
)
expect(validator.isValid).to.eql(true)
})
xdescribe('multi line CSV validation with included schema', () => {
})
xdescribe('single line row validation with included schema', () => {
})
describe('validation with multiple lines: ', () => {
// TODO multiple lines permits testing of warnings
// TODO need more assertions in each test IE @formats
// TODO the phrasing of col_counts if only consulting specs might be confusing
// TODO ^-> col_counts and data.length should be equivalent, but only data is populated outside of if row.nil?
// TODO ^- -> and its less the size of col_counts than the homogeneity of its contents which is important
it('validates a well formed CSV', async () => {
// when invoking parse contents
const data = '"Foo","Bar","Baz"\r\n"1","2","3"\r\n"1","2","3"\r\n"3","2","1"'
const validator = await CsvlintValidator(data)
expect(validator.isValid).to.eql(true)
expect(validator.expectedColumns_).to.eql(3)
expect(validator.colCounts_.length).to.eql(4)
expect(validator.data.length).to.eql(4)
})
it('parses malformed CSV and catches unclosed quote', async () => {
// doesn"t build warnings because check_consistency isn"t invoked
const data = '"Foo","Bar","Baz"\r\n"1","2","3"\r\n"1","2","3"\r\n"3","2","1'
const validator = await CsvlintValidator(data)
expect(validator.isValid).to.eql(false)
expect(validator.errors.length).to.eql(1)
expect(validator.errors[0].type).to.eql('unclosedQuote')
})
it('parses malformed CSV and catches stray quote', async () => {
const data = '"Foo","Bar","Baz"\r\n"1","2","3"\r\n"1","2","3"\r\n"3","2","1""'
const validator = await CsvlintValidator(data)
expect(validator.isValid).to.eql(false)
// expect(validator.errors[0].type).to.eql('stray_quote')
// can't exactly replicate csvlint.rb behaviour here -
// error is detected, but error code is different
expect(validator.errors[0].type).to.eql('unclosedQuote')
expect(validator.errors.length).to.eql(1)
})
it('parses malformed CSV and catches whitespace and edge case', async () => {
const data = '"Foo","Bar","Baz"\r\n"1","2","3"\r\n"1","2","3"\r\n"3","2","1" '
const validator = await CsvlintValidator(data)
expect(validator.isValid).to.eql(false)
// expect(validator.errors[0].type).to.eql('whitespace')
// can't exactly replicate csvlint.rb behaviour here -
// error is detected, but error code is different
expect(validator.errors.length).to.eql(2)
const errorTypes = validator.errors.map(e => e.type)
expect(errorTypes).to.contain('trailingCharacters')
expect(errorTypes).to.contain('unclosedQuote')
})
it('handles line breaks within a cell', async () => {
const data = '"a","b","c"\r\n"d","e","this is\r\nvalid"\r\n"a","b","c"'
const validator = await CsvlintValidator(data)
expect(validator.isValid).to.eql(true)
})
it('handles multiple line breaks within a cell', async () => {
const data = '"a","b","c"\r\n"d","this is\r\n valid","as is this\r\n too"'
const validator = await CsvlintValidator(data)
expect(validator.isValid).to.eql(true)
})
})
describe('csv dialect', () => {
it('should provide sensible defaults for CSV parsing', async () => {
const validator = await CsvlintValidator(loadFromUrl())
const opts = validator.csvOptions
expect(opts).to.include({
colSep: ',',
rowSep: 'auto',
quoteChar: '"',
skipBlanks: false
})
})
it('should map CSV DDF to correct values', async () => {
const validator = await CsvlintValidator(loadFromUrl())
const opts = validator.dialectToCsvOptions({
lineTerminator: '\n',
delimiter: '\t',
quoteChar: '"'
})
expect(opts).to.include({
colSep: '\t',
rowSep: '\n',
quoteChar: '"',
skipBlanks: false
})
})
it('`validate` to pass input in streaming fashion', async () => {
// warnings are built when validate is used to call all three methods
const data = '"Foo","Bar","Baz"\r\n"1","2","3"\r\n"1","2","3"\r\n"3","2","1"'
const validator = await CsvlintValidator(data)
expect(validator.isValid).to.eql(true)
expect(validator.expectedColumns_).to.eql(3)
expect(validator.colCounts_.length).to.eql(4)
expect(validator.data.length).to.eql(4)
expect(validator.infoMessages.length).to.eql(1)
})
it('`validate` parses malformed CSV, populates errors, warnings & info_msgs,invokes finish()', async () => {
const data = '"Foo","Bar","Baz"\r\n"1","2","3"\r\n"1","2","3"\r\n"1","two","3"\r\n"3","2", "1"'
const validator = await CsvlintValidator(data)
expect(validator.isValid).to.eql(false)
expect(validator.expectedColumns_).to.eql(3)
expect(validator.colCounts_.length).to.eql(4)
expect(validator.data.length).to.eql(5)
expect(validator.infoMessages.length).to.eql(1)
expect(validator.errors.length).to.eql(1)
expect(validator.errors[0].type).to.eql('invalidOpeningQuote') // .rb has whitespace
expect(validator.warnings.length).to.eql(1)
expect(validator.warnings[0].type).to.eql('inconsistentValues')
})
it('`validate` passes a valid csv', async () => {
const filename = path.join(__dirname, 'fixtures', 'valid_many_rows.csv')
const validator = await CsvlintValidator(filename)
expect(validator.isValid).to.eql(true)
expect(validator.infoMessages.length).to.eql(1)
expect(validator.infoMessages[0].type).to.eql('assumed_header')
expect(validator.infoMessages[0].category).to.eql('structure')
})
})
describe('with a single row', async () => {
it('validates correctly', async () => {
const data = '"a","b","c"\r\n'
const validator = await CsvlintValidator(data, { header: false })
expect(validator.isValid).to.eql(true)
})
it('checks for non rfc line breaks', async () => {
const data = '"a","b","c"\n'
const validator = await CsvlintValidator(data, { header: false })
expect(validator.isValid).to.eql(true)
expect(validator.infoMessages.length).to.eql(1)
expect(validator.infoMessages[0].type).to.eql('nonrfc_line_breaks')
})
it('checks for blank rows', async () => {
const data = '"","",\r\n'
const validator = await CsvlintValidator(data, { header: false })
expect(validator.isValid).to.eql(false)
expect(validator.errors.length).to.eql(1)
expect(validator.errors[0].type).to.eql('blank_rows')
})
it('returns the content of the string with the error', async () => {
const data = '"","",""\r\n'
const validator = await CsvlintValidator(data, { header: false })
expect(validator.errors[0].content).to.eql(data)
})
it('should presume a header unless told otherwise', async () => {
const data = '1,2,3\r\n'
const validator = await CsvlintValidator(data)
expect(validator.isValid).to.eql(true)
expect(validator.infoMessages.length).to.eql(1)
expect(validator.infoMessages[0].type).to.eql('assumed_header')
expect(validator.infoMessages[0].category).to.eql('structure')
})
it('should evaluate the row as "row 2" when stipulated', async () => {
const data = '1,2,3\r\n'
const validator = await CsvlintValidator(data, { header: false })
expect(validator.isValid).to.eql(true)
expect(validator.infoMessages.length).to.eql(0)
})
})
describe('it returns the correct error from ERROR_MATCHES (mechanism has changed for JS - not translating exceptions)', async () => {
it('checks for unclosed quotes', async () => {
const data = '"a,"b","c"\n'
const validator = await CsvlintValidator(data)
expect(validator.isValid).to.eql(false)
expect(validator.errors.length).to.eql(1)
// expect(validator.errors[0].type).to.eql('unclosed_quote')
expect(validator.errors[0].type).to.eql('trailingCharacters')
})
// TODO stray quotes is not covered in any spec in this library
it('checks for stray quotes', async () => {
const data = '"a","b","c" "\r\n'
const validator = await CsvlintValidator(data)
expect(validator.isValid).to.eql(false)
expect(validator.errors.length).to.eql(1)
// expect(validator.errors[0].type).to.eql('stray_quote')
// can't exactly replicate csvlint.rb behaviour here -
// error is detected, but error code is different
expect(validator.errors[0].type).to.eql('trailingCharacters')
})
it('checks for whitespace', async () => {
const data = ' "a","b","c"\r\n'
const validator = await CsvlintValidator(data)
expect(validator.isValid).to.eql(false)
expect(validator.errors.length).to.eql(1)
// expect(validator.errors[0].type).to.eql('whitespace')
expect(validator.errors[0].type).to.eql('invalidOpeningQuote')
})
it('returns line break errors if incorrectly specified', async () => {
// TODO the logic for catching this error message is very esoteric
const data = '"a","b","c"\n'
const validator = await CsvlintValidator(data, { lineTerminator: '\r\n' })
expect(validator.isValid).to.eql(false)
expect(validator.errors.length).to.eql(1)
expect(validator.errors[0].type).to.eql('line_breaks')
})
})
describe('when validating headers', () => {
it("should warn if column names aren't unique", async () => {
const data = 'minimum,minimum\n'
const validator = await CsvlintValidator(data)
expect(validator.warnings.length).to.eql(1)
expect(validator.warnings[0].type).to.eql('duplicate_column_name')
expect(validator.warnings[0].category).to.eql('schema')
})
it('should warn if column names are blank', async () => {
const data = 'minimum,\n'
const validator = await CsvlintValidator(data)
expect(validator.warnings.length).to.eql(1)
expect(validator.warnings[0].type).to.eql('empty_column_name')
expect(validator.warnings[0].category).to.eql('schema')
})
it('should include info message about missing header when we have assumed a header', async () => {
const data = '1,2,3\r\n'
const validator = await CsvlintValidator(data)
expect(validator.isValid).to.eql(true)
expect(validator.infoMessages.length).to.eql(1)
expect(validator.infoMessages[0].type).to.eql('assumed_header')
expect(validator.infoMessages[0].category).to.eql('structure')
})
it('should not include info message about missing header when we are told about the header', async () => {
const data = '1,2,3\r\n'
const validator = await CsvlintValidator(data, { header: false })
expect(validator.isValid).to.eql(true)
expect(validator.infoMessages.length).to.eql(0)
})
it("should not be an error if we have assumed a header, there is no dialect and content-type doesn't declare header, as we assume header=present", async () => {
const validator = await CsvlintValidator(loadFromUrl())
expect(validator.isValid).to.eql(true)
})
it('should be valid if we have a dialect and the data is from the web', async () => {
// header defaults to true in csv dialect, so this is valid
let validator = await CsvlintValidator(loadFromUrl(), {})
expect(validator.isValid).to.eql(true)
validator = await CsvlintValidator(loadFromUrl(), { header: true })
expect(validator.isValid).to.eql(true)
validator = await CsvlintValidator(loadFromUrl(), { header: false })
expect(validator.isValid).to.eql(true)
})
})
describe('DO NOW! build formats', () => {
const formats = {
string: 'foo',
numeric: '1',
uri: 'http://www.example.com',
dateTime_iso8601: '2013-01-01T13:00:00Z',
date_db: '2013-01-01',
dateTime_hms: '13:00:00'
}
for (const [type, content] of Object.entries(formats)) {
it(`should return the format of ${type} correctly`, async () => {
const row = [content]
const validator = await CsvlintValidator(loadFromUrl(emptyUrlPath))
validator.buildFormats(row)
const format = validator.formats_[0]
expect(Object.keys(format)[0]).to.eql(type)
})
}
it('treats floats and ints the same', async () => {
const row = ['12', '3.1476']
const validator = await CsvlintValidator(loadFromUrl(emptyUrlPath))
validator.buildFormats(row)
const formats = validator.formats_
expect(Object.keys(formats[0])[0]).to.eql('numeric')
expect(Object.keys(formats[1])[0]).to.eql('numeric')
})
it('should ignore blank arrays', async () => {
const row = []
const validator = await CsvlintValidator(loadFromUrl(emptyUrlPath))
validator.buildFormats(row)
const formats = validator.formats_
expect(formats).to.eql([])
})
it('should work correctly for single columns', async () => {
const rows = [
['foo'],
['bar'],
['baz']
]
const validator = await CsvlintValidator(loadFromUrl(emptyUrlPath))
for (const row of rows) {
validator.buildFormats(row)
}
const formats = validator.formats_
expect(formats).to.eql([{ string: 3 }])
})
it('should return formats correctly if a row is blank', async () => {
const rows = [
[],
['foo', '1', '$2345']
]
const validator = await CsvlintValidator(loadFromUrl(emptyUrlPath))
for (const row of rows) {
validator.buildFormats(row)
}
const formats = validator.formats_
expect(formats).to.eql([
{ string: 1 },
{ numeric: 1 },
{ string: 1 }
])
})
})
describe('check consistency', async () => {
it('should return a warning if columns have inconsistent values', async () => {
const formats = [
{ string: 3 },
{ string: 2, numeric: 1 },
{ numeric: 3 }
]
const validator = await CsvlintValidator(loadFromUrl())
validator.formats_ = formats
validator.checkConsistency()
const warnings = validator.warnings
.filter(w => w.type === 'inconsistentValues')
expect(warnings.length).to.eql(1)
})
})
describe('when detecting headers', () => {
it('should default to expecting a header', async () => {
const validator = await CsvlintValidator(loadFromUrl())
expect(validator.hasHeader).to.eql(true)
})
it('should look in CSV options to detect header', async () => {
let validator = await CsvlintValidator(loadFromUrl(), { header: true })
expect(validator.hasHeader).to.eql(true)
validator = await CsvlintValidator(loadFromUrl(), { header: false })
expect(validator.hasHeader).to.eql(false)
})
it('should look in content-type for header=absent', async () => {
const validator = await CsvlintValidator(loadFromUrl(exampleUrlPath, { 'Content-Type': 'text/csv; header=absent' }))
expect(validator.hasHeader).to.eql(false)
expect(validator.isValid).to.eql(true)
expect(validator.infoMessages.length).to.eql(0)
})
it('should look in content-type for header=present', async () => {
const validator = await CsvlintValidator(loadFromUrl(exampleUrlPath, { 'Content-Type': 'text/csv; header=present' }))
expect(validator.hasHeader).to.eql(true)
expect(validator.isValid).to.eql(true)
expect(validator.infoMessages.length).to.eql(0)
})
it('assume header present if not specified in content type', async () => {
const validator = await CsvlintValidator(loadFromUrl())
expect(validator.hasHeader).to.eql(true)
expect(validator.isValid).to.eql(true)
expect(validator.infoMessages.length).to.eql(1)
expect(validator.infoMessages[0].type).to.eql('assumed_header')
})
it('give wrong content type error if content type is wrong', async () => {
const validator = await CsvlintValidator(loadFromUrl(exampleUrlPath, { 'Content-Type': 'text/html' }))
expect(validator.hasHeader).to.eql(true)
expect(validator.errors.length).to.eql(1)
expect(validator.errors[0].type).to.eql('wrong_content_type')
})
})
describe('accessing metadata', () => {
// before :all do
// stub_request(:get, "http://example.com/crlf.csv").to_return(:status => 200, :body => File.read(File.join(File.dirname(__FILE__),"..","features","fixtures","windows-line-})ings.csv")))
// stub_request(:get, "http://example.com/crlf.csv-metadata.json").to_return(:status => 404)
it('can get line break symbol', async () => {
const validator = await CsvlintValidator(loadFromUrl('/crlf.csv'))
expect(validator.lineBreaks).to.eql('\r\n')
})
it('should give access to the complete CSV data file', async () => {
const validator = await CsvlintValidator(
loadFromUrl(exampleUrlPath, { 'Content-Type': 'text/csv; header=present' })
)
expect(validator.isValid).to.eql(true)
const data = validator.data
expect(data.length).to.eql(3)
expect(data[0]).to.eql(['Foo', 'Bar', 'Baz'])
expect(data[2]).to.eql(['3', '2', '1'])
})
it('should count the total number of rows read', async () => {
const validator = await CsvlintValidator(
loadFromUrl(exampleUrlPath, { 'Content-Type': 'text/csv; header=present' })
)
expect(validator.rowCount).to.eql(3)
})
for (const state of ['absent', 'present']) {
it(`header=${state} in content-type`, async () => {
const validator = await CsvlintValidator(
loadFromUrl(exampleUrlPath, { 'Content-Type': `text/csv; header=${state}` })
)
expect(validator.hasHeader).to.eql(state === 'present')
})
}
it('should limit number of lines read', async () => {
const validator = await CsvlintValidator(
loadFromUrl(exampleUrlPath, { 'Content-Type': 'text/csv; header=present' }),
{},
null,
{ limitLines: 2 }
)
expect(validator.isValid).to.eql(true)
const data = validator.data
expect(data.length).to.eql(2)
expect(data[0]).to.eql(['Foo', 'Bar', 'Baz'])
})
})
describe('with a lambda', () => {
it('should call a lambda for each line', async () => {
let count = 0
const lambda = row => ++count
await CsvlintValidator(path.join(__dirname, 'fixtures', 'valid.csv'), {}, null, { lambda })
expect(count).to.eql(3)
})
it('reports back the status of each line', async () => {
const results = []
const lambda = (row, currentLine) => results.push(currentLine)
await CsvlintValidator(path.join(__dirname, 'fixtures', 'valid.csv'), {}, null, { lambda })
expect(results.length).to.eql(3)
expect(results[0]).to.eql(1)
expect(results[1]).to.eql(2)
expect(results[2]).to.eql(3)
})
})
/*
// Commented out because there is currently no way to mock redirects with Typhoeus and WebMock - see https://github.com/bblimke/webmock/issues/237
// it("should follow redirects to SSL", () => {
// stub_request(:get, "http://example.com/redirect").to_return(:status => 301, :headers=>{"Location" => "https://example.com/example.csv"})
// stub_request(:get, "https://example.com/example.csv").to_return(:status => 200,
// :headers=>{"Content-Type" => "text/csv; header=present"},
// :body => File.read(File.join(File.dirname(__FILE__),"..","features","fixtures","valid.csv")))
//
// const validator = await CsvlintValidator("http://example.com/redirect")
// expect( validator.isValid ).to.eql(true)
// })
*/
})
<file_sep># CSV Lint
A Node package to support validating CSV files to check their syntax and contents. You can either use this package within your own JavaScript code, or as a standalone command line application.
## In Development
This package is in development. It is derived, with thanks, from [csvlint.rb](https://github.com/Data-Liberation-Front/csvlint.rb) developed by my friends at [The ODI](https://theodi.org/).
Initial work will port csvlint.rb to JavaScript, aiming for as full feature compatibility as is reasonable. There's may be some cosmetic changes, e.g. method and property names will be adapted from Ruby's snake_case to JavaScripts's more common camelCase, but the validation performed will be as similar as I can make it.
Future work will look to provide a [stream.Transform interface](https://nodejs.org/api/stream.html#stream_class_stream_transform) to csvlint.js so that it could be used, for example, as a validating parser in place of [csv-parse](https://www.npmjs.com/package/csv-parse).
*Note:* csvlint.js does not implement its own CSV parsing logic. That would be madness. Like all right-thinking JavaScript programmers, under the covers it uses [csv-parse](https://www.npmjs.com/package/csv-parse).
<file_sep>/* eslint-env mocha */
const expect = require('chai').expect
const nock = require('nock')
const CsvlintField = require('../lib/csvlint/field')
const CsvlintSchema = require('../lib/csvlint/schema')
describe('Csvlint::Schema', () => {
it('should tolerate missing fields', () => {
const schema = CsvlintSchema.fromJsonTable('http://example.org', {})
expect(schema).to.not.eql(null)
expect(schema.fields.length).to.eql(0)
})
it('should tolerate fields with no constraints', () => {
const schema = CsvlintSchema.fromJsonTable('http://example.org', {
fields: [{ name: 'test' }]
})
expect(schema).to.not.eql(null)
expect(schema.fields[0].name).to.eql('test')
expect(schema.fields[0].constraints).to.eql({})
})
it('should validate against the schema', () => {
const field = new CsvlintField('test', { required: true })
const field2 = new CsvlintField('test', { minLength: 3 })
const schema = new CsvlintSchema('http://example.org', [field, field2])
expect(schema.validateRow(['', 'x'])).to.eql(false)
expect(schema.errors.length).to.eql(2)
expect(schema.errors[0].type).to.eql('missing_value')
expect(schema.errors[0].category).to.eql('schema')
expect(schema.errors[0].column).to.eql(1)
expect(schema.validateRow(['abc', '1234'])).to.eql(true)
})
it('should include validations for missing columns', () => {
const minimum = new CsvlintField('test', { minLength: 3 })
const required = new CsvlintField('test2', { required: true })
const minReq = new CsvlintSchema('http://example.org', [minimum, required])
expect(minReq.validateRow(['abc', 'x'])).to.eql(true)
expect(minReq.validateRow(['abc'])).to.eql(false)
expect(minReq.errors.length).to.eql(1)
expect(minReq.errors[0].type).to.eql('missing_value')
const reqMin = new CsvlintSchema('http://example.org', [required, minimum])
expect(reqMin.validateRow(['x', 'abc'])).to.eql(true)
expect(reqMin.validateRow(['abc'])).to.eql(false)
expect(reqMin.errors.length).to.eql(1)
expect(reqMin.errors[0].type).to.eql('min_length')
})
it('should warn if the data has fewer columns', () => {
const minimum = new CsvlintField('test', { minLength: 3 })
const required = new CsvlintField('test2', { maxLength: 5 })
const schema = new CsvlintSchema('http://example.org', [minimum, required])
expect(schema.validateRow(['abc'], 1)).to.eql(true)
expect(schema.warnings.length).to.eql(1)
expect(schema.warnings[0].type).to.eql('missing_column')
expect(schema.warnings[0].category).to.eql('schema')
expect(schema.warnings[0].row).to.eql(1)
expect(schema.warnings[0].column).to.eql(2)
// no ragged row error
expect(schema.errors.length).to.eql(0)
})
it('should warn if the data has additional columns', () => {
const minimum = new CsvlintField('test', { minLength: 3 })
const required = new CsvlintField('test2', { required: true })
const schema = new CsvlintSchema('http://example.org', [minimum, required])
expect(schema.validateRow(['abc', 'x', 'more', 'columns'], 1)).to.eql(true)
expect(schema.warnings.length).to.eql(2)
expect(schema.warnings[0].type).to.eql('extra_column')
expect(schema.warnings[0].category).to.eql('schema')
expect(schema.warnings[0].row).to.eql(1)
expect(schema.warnings[0].column).to.eql(3)
expect(schema.warnings[1].type).to.eql('extra_column')
expect(schema.warnings[1].column).to.eql(4)
// no ragged row error
expect(schema.errors.length).to.eql(0)
})
describe('when validating header', () => {
it('should warn if column names are different to field names', () => {
const minimum = new CsvlintField('minimum', { minLength: 3 })
const required = new CsvlintField('required', { required: true })
const schema = new CsvlintSchema('http://example.org', [minimum, required])
expect(schema.validateHeader(['minimum', 'required'])).to.eql(true)
expect(schema.warnings.length).to.eql(0)
expect(schema.validateHeader(['wrong', 'required'])).to.eql(true)
expect(schema.warnings.length).to.eql(1)
expect(schema.warnings[0].row).to.eql(1)
expect(schema.warnings[0].type).to.eql('malformed_header')
expect(schema.warnings[0].content).to.eql('wrong,required')
expect(schema.warnings[0].column).to.eql(null)
expect(schema.warnings[0].category).to.eql('schema')
expect(schema.warnings[0].constraints).to.have.property('expectedHeader', 'minimum,required')
expect(schema.validateHeader(['minimum', 'Required'])).to.eql(true)
expect(schema.warnings.length).to.eql(1)
})
it('should warn if column count is less than field count', () => {
const minimum = new CsvlintField('minimum', { minLength: 3 })
const required = new CsvlintField('required', { required: true })
const schema = new CsvlintSchema('http://example.org', [minimum, required])
expect(schema.validateHeader(['minimum'])).to.eql(true)
expect(schema.warnings.length).to.eql(1)
expect(schema.warnings[0].row).to.eql(1)
expect(schema.warnings[0].type).to.eql('malformed_header')
expect(schema.warnings[0].content).to.eql('minimum')
expect(schema.warnings[0].column).to.eql(null)
expect(schema.warnings[0].category).to.eql('schema')
expect(schema.warnings[0].constraints).to.have.property('expectedHeader', 'minimum,required')
})
it('should warn if column count is more than field count', () => {
const minimum = new CsvlintField('minimum', { minLength: 3 })
const schema = new CsvlintSchema('http://example.org', [minimum])
expect(schema.validateHeader(['wrong', 'required'])).to.eql(true)
expect(schema.warnings.length).to.eql(1)
expect(schema.warnings[0].row).to.eql(1)
expect(schema.warnings[0].type).to.eql('malformed_header')
expect(schema.warnings[0].content).to.eql('wrong,required')
expect(schema.warnings[0].column).to.eql(null)
expect(schema.warnings[0].category).to.eql('schema')
expect(schema.warnings[0].constraints).to.have.property('expectedHeader', 'minimum')
})
})
describe('when parsing JSON Tables', () => {
const example = `{
"title": "Schema title",
"description": "schema",
"fields": [
{
"name": "ID",
"constraints": { "required": true },
"title": "id",
"description": "house identifier"
},
{
"name": "Price",
"constraints": { "required": true, "minLength": 1 }
},
{
"name": "Postcode",
"constraints": { "required": true, "pattern": "[A-Z]{1,2}[0-9][0-9A-Z]? ?[0-9][A-Z]{2}" }
}
]
}
`
nock('http://example.com')
.get('/example.json')
.reply(200, example)
it('should create a schema from a pre-parsed JSON table', () => {
const json = JSON.parse(example)
const schema = CsvlintSchema.fromJsonTable('http://example.org', json)
expect(schema.uri).to.eql('http://example.org')
expect(schema.title).to.eql('Schema title')
expect(schema.description).to.eql('schema')
expect(schema.fields.length).to.eql(3)
expect(schema.fields[0].name).to.eql('ID')
expect(schema.fields[0].constraints.required).to.eql(true)
expect(schema.fields[0].title).to.eql('id')
expect(schema.fields[0].description).to.eql('house identifier')
expect(schema.fields[2].constraints.pattern).to.eql('[A-Z]{1,2}[0-9][0-9A-Z]? ?[0-9][A-Z]{2}')
})
it('should create a schema from a JSON Table URL', async () => {
const schema = await CsvlintSchema.loadFromUri('http://example.com/example.json')
expect(schema.uri).to.eql('http://example.com/example.json')
expect(schema.fields.length).to.eql(3)
expect(schema.fields[0].name).to.eql('ID')
expect(schema.fields[0].constraints.required).to.eql(true)
})
})
xdescribe('when parsing CSVW metadata', () => {
/*
const example = `
{
"@context": "http://www.w3.org/ns/csvw",
"url": "http://example.com/example1.csv",
"tableSchema": {
"columns": [
{ "name": "Name", "required": true, "datatype": { "base": "string", "format": ".+" } },
{ "name": "Id", "required": true, "datatype": { "base": "string", "minLength": 3 } },
{ "name": "Email", "required": true }
]
}
}
`
*/
// stub_request(:get, "http://example.com/metadata.json").to_return(:status => 200, :body => @example)
it('should create a table group from a CSVW metadata URL', () => {
// const schema = CsvlintSchema.loadFromUri("http://example.com/metadata.json")
// expect(schema.class).to.eql(Csvlint::Csvw::TableGroup)
})
})
})
<file_sep>const ErrorCollector = require('../error-collector')
const CsvwPropertyChecker = require('./property-checker')
const metadataError = require('./metadata-error')
const ErrorMessage = require('../error-message')
const NumberFormat = require('./number-format')
const DateFormat = require('./date-format')
class Column {
get id() { return this.id_ }
get about_url() { return this.about_url_ }
get datatype() { return this.datatype_ }
get default_value() { return this.default_value_ }
get lang() { return this.lang_ }
get name() { return this.name_ }
get nulls() { return this.nulls_ }
get number() { return this.number_ }
get ordered() { return this.ordered_ }
get property_url() { return this.property_url_ }
get required() { return this.required_ }
get separator() { return this.separator_ }
get source_number() { return this.source_number_ }
get suppress_output() { return this.suppress_output_ }
get text_direction() { return this.text_direction_ }
get default_name() { return this.default_name_ }
get titles() { return this.titles_ }
get value_url() { return this.value_url_ }
get virtual() { return this.virtual_ }
get annotations() { return this.annotations_ }
constructor (
number,
name,
{
id = null,
about_url = null,
datatype = { '@id': 'http://www.w3.org/2001/XMLSchema#string' },
default_value = '',
lang = 'und',
nulls = [''],
ordered = false,
property_url = null,
required = false,
separator = null,
source_number = null,
suppress_output = false,
text_direction = 'inherit',
default_name = null,
titles = {},
value_url = null,
virtual = false,
annotations = {},
warnings = []
} = {}
) {
this.number_ = number
this.name_ = name
this.id_ = id
this.about_url_ = about_url
this.datatype_ = datatype
this.default_value_ = default_value
this.lang_ = lang
this.nulls_ = nulls
this.ordered_ = ordered
this.property_url_ = property_url
this.required_ = required
this.separator_ = separator
this.source_number_ = source_number || number
this.suppress_output_ = suppress_output
this.text_direction_ = text_direction
this.default_name_ = default_name
this.titles_ = titles
this.value_url_ = value_url
this.virtual_ = virtual
this.annotations_ = annotations
this.errors_ = new ErrorCollector()
this.errors_.warnings.push(...warnings)
} // constructor
get warnings () { return this.errors_.warnings }
get errors () { return this.errors_.errors }
static fromJson (number, column_desc, base_url = null, lang = 'und', inherited_properties = {}) {
const annotations = {}
const warnings = []
const column_properties = {}
inherited_properties = Object.assign({}, inherited_properties)
const addWarning = (type, category, content, constraint) => {
warnings.push(new ErrorMessage(type, category, null, null, content, constraint))
} // warning
for (const [property, value] of Object.entries(column_desc)) {
if (property === '@type') {
if (value !== 'Column') {
metadataError(`columns[${number}].@type`, "@type of column is not 'Column'")
}
} else {
const [v, warning, type] = CsvwPropertyChecker(property, value, base_url, lang)
warning.forEach(w => addWarning(w, 'metadata', `${property}: ${value}`))
if (type === 'annotation') annotations[property] = v
else if (type === 'common' || type === 'column') column_properties[property] = v
else if (type === 'inherited') inherited_properties[property] = v
else addWarning('invalid_property', 'metadata', `column: ${property}`, null)
}
} // for ...
return new Column(
number,
column_properties.name,
{
id: column_properties['@id'],
datatype: inherited_properties.datatype || { '@id': 'http://www.w3.org/2001/XMLSchema#string' },
lang: inherited_properties.lang || 'und',
nulls: inherited_properties.null || [''],
default_values: inherited_properties.default || '',
about_url: inherited_properties.aboutUrl,
property_url: inherited_properties.propertyUrl,
value_url: inherited_properties.valueUrl,
required: inherited_properties.required || false,
separator: inherited_properties.separator,
ordered: inherited_properties.ordered || false,
default_name: column_properties.titles && column_properties.titles[lang] ? column_properties.titles[lang][0] : null,
titles: column_properties.titles || null,
suppress_output: column_properties.suppressOutput ? column_properties.suppressOutput : false,
virtual: column_properties.virtual || false,
annotations: annotations,
warnings: warnings
}
)
} // fromJson
validate (stringValue = null, row = null) {
stringValue = stringValue || this.default_value_
if (this.nulls.includes(stringValue)) {
this.validateRequired(null, row)
return null
}
const stringValues = this.separator_
? stringValue.split(this.separator_)
: [ stringValue ]
const baseType = this.datatype_["base"]
const idType = this.datatype_["@id"]
const dataTypeParser = DATATYPE_PARSER[baseType || idType]
const formatType = this.datatype_["format"]
const parseString = s => dataTypeParser(s, formatType)
const values = []
for (const s of stringValues) {
const [value, warning] = parseString(s)
if (!warning) {
this.validateRequired(value, row)
const valid = this.validateFormat(value, row) &&
this.validateLength(value, row) &&
this.validateValue(value, row)
values.push(valid ? value : { invalid: s })
} else {
this.errors(warning, row, this.number_, s, this.datatype_)
values.push({ invalid: s })
}
} // for ...
return this.separator_
? values
: values[0]
} // validate
validateRequired (value, row) {
if (this.required_ && value === null) {
this.error('required', row, this.number_, value, { required: this.required_ })
return false
}
return true
} // validateRequired
validateLength (value, row) {
const { length, minLength, maxLength } = this.datatype_
if (!length && !minLength && !maxLength)
return true
let vlength = value.length
if (this.isBase64())
vlength = value.replace(/==?$/, '').length * 3 /4
if (this.isHexBinary())
vlength = value.length / 2
let valid = true
if (minLength && vlength < minLength) {
this.error('min_length', row, this.number_, value, { minLength })
valid = false
}
if (maxLength && vlength > maxLength) {
this.error('max_length', row, this.number_, value, { maxLength })
valid = false
}
if (length && vlength != length) {
this.error('length', row, this.number_, value, { length })
valid = false
}
return valid
} // validateLength
isBase64 () {
const base64 = 'http://www.w3.org/2001/XMLSchema#base64Binary'
return (this.datatype_["@id"] === base64 ) ||
(this.datatype_['base'] === base64)
} // isBase64
isHexBinary () {
const hexBinary = 'http://www.w3.org/2001/XMLSchema#hexBinary'
return (this.datatype_["@id"] === hexBinary ) ||
(this.datatype_['base'] === hexBinary)
} // isHexBinary
validateFormat (value, row) {
const { base, format } = this.datatype_
if (!format) return true
const valid = DATATYPE_FORMAT_VALIDATION[base](value, format)
if (!valid)
this.error('format', row, this.number_, value, { format })
return valid
} // validateFormat
validateValue (value, row) {
const { minInclusive, maxInclusive, minExclusive, maxExclusive } = this.datatype_
let v = i => i
if (value.dateTime) {
value = value.dateTime
v = i => i.dateTime
}
let valid = true
if (minInclusive && (value < v(minInclusive))) {
this.error('min_inclusive', row, this.number_, value, { minInclusive })
valid = false
}
if (maxInclusive && (value > v(maxInclusive))) {
this.error('max_inclusive', row, this.number_, value, { maxInclusive })
valid = false
}
if (minExclusive && (value <= v(minExclusive))) {
this.error('min_exclusive', row, this.number_, value, { minExclusive })
valid = false
}
if (maxExclusive && (value >= v(maxExclusive))) {
this.error('max_exclusive', row, this.number_, value, { maxExclusive })
valid = false
}
return valid
} // validateValue
warning (type, category, content, constraint) {
this.errors_.buildWarning(type, category, null, null, content, constraint)
} // warning
error (type, row, column, content, constraint) {
this.errors_.buildError(type, 'schema', row, column, content, constraint)
} // error
} // class Column
const REGEXP_VALIDATION = (value, format) => format.test(value)
const NO_ADDITIONAL_VALIDATION = (value, format) => true
const DATATYPE_FORMAT_VALIDATION = {
"http://www.w3.org/1999/02/22-rdf-syntax-ns#XMLLiteral": REGEXP_VALIDATION,
"http://www.w3.org/1999/02/22-rdf-syntax-ns#HTML": REGEXP_VALIDATION,
"http://www.w3.org/ns/csvw#JSON": REGEXP_VALIDATION,
"http://www.w3.org/2001/XMLSchema#anyAtomicType": REGEXP_VALIDATION,
"http://www.w3.org/2001/XMLSchema#anyURI": REGEXP_VALIDATION,
"http://www.w3.org/2001/XMLSchema#base64Binary": REGEXP_VALIDATION,
"http://www.w3.org/2001/XMLSchema#boolean": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#date": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#dateTime": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#dateTimeStamp": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#decimal": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#integer": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#long": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#int": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#short": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#byte": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#nonNegativeInteger": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#positiveInteger": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#unsignedLong": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#unsignedInt": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#unsignedShort": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#unsignedByte": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#nonPositiveInteger": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#negativeInteger": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#double": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#duration": REGEXP_VALIDATION,
"http://www.w3.org/2001/XMLSchema#dayTimeDuration": REGEXP_VALIDATION,
"http://www.w3.org/2001/XMLSchema#yearMonthDuration": REGEXP_VALIDATION,
"http://www.w3.org/2001/XMLSchema#float": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#gDay": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#gMonth": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#gMonthDay": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#gYear": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#gYearMonth": NO_ADDITIONAL_VALIDATION,
"http://www.w3.org/2001/XMLSchema#hexBinary": REGEXP_VALIDATION,
"http://www.w3.org/2001/XMLSchema#QName": REGEXP_VALIDATION,
"http://www.w3.org/2001/XMLSchema#string": REGEXP_VALIDATION,
"http://www.w3.org/2001/XMLSchema#normalizedString": REGEXP_VALIDATION,
"http://www.w3.org/2001/XMLSchema#token": REGEXP_VALIDATION,
"http://www.w3.org/2001/XMLSchema#language": REGEXP_VALIDATION,
"http://www.w3.org/2001/XMLSchema#Name": REGEXP_VALIDATION,
"http://www.w3.org/2001/XMLSchema#NMTOKEN": REGEXP_VALIDATION,
"http://www.w3.org/2001/XMLSchema#time": NO_ADDITIONAL_VALIDATION
}
const TRIM_VALUE = (value, format) => { return [value.trim(), null] }
const ALL_VALUES_VALID = (value, format) => { return [value, null] }
function NUMERIC_PARSER(value, format, integer=false) {
if (format === null) {
format = NumberFormat(null, null, ".", integer)
}
const v = format.parse(value)
return (v !== null) ? [v, null] : [null, 'invalid_number']
} // NUMERIC_PARSER
function createDateParser(type, warning) {
return (value, format) => {
if (format === null) {
format = DateFormat(null, type)
}
const v = format.parse(value)
return (v !== null) ? [v, null] : [null, warning]
}
} // createDateParser
function createRegexpBasedParser(regexp, warning) {
return (value, format) => {
return (regexp.test(value)) ? [value, null] : [null, warning]
}
} // createRegexpBasedParser
function BOOLEAN_PARSER(value, format) {
if (format === null) {
if (["true", "1"].includes(value)) return [true, null]
if (["false", "0"].includes(value)) return [false, null]
} else {
if (value === format[0]) return [true, null]
if (value === format[1]) return [false, null]
}
return [value, 'invalid_boolean']
} // BOOLEAN_PARSER
function DECIMAL_PARSER(value, format) {
if (/(E|e|^(NaN|INF|-INF)$)/.test(value))
return [null, 'invalid_decimal']
return NUMERIC_PARSER(value, format)
} // DECIMAL_PARSER
function INTEGER_PARSER(value, format) {
const [v, w] = NUMERIC_PARSER(value, format, true)
if (w !== null) return [null, 'invalid_integer']
if (!Number.isInteger(v)) return [null, 'invalid_integer']
return [v, w]
} // INTEGER_PARSER
function LONG_PARSER(value, format) {
const [v, w] = INTEGER_PARSER(value, format)
if (w !== null) return [null, 'invalid_long']
if (v > 9223372036854775807 || v < -9223372036854775808) return [null, 'invalid_long']
return [v, w]
} // LONG_PARSER
function INT_PARSER(value, format) {
const [v, w] = INTEGER_PARSER(value, format)
if (w !== null) return [null, 'invalid_int']
if (v > 2147483647 || v < -2147483648) return [null, 'invalid_int']
return [v, w]
} // INT_PARSER
function SHORT_PARSER(value, format) {
const [v, w] = INTEGER_PARSER(value, format)
if (w !== null) return [null, 'invalid_short']
if (v > 32767 || v < -32768) return [null, 'invalid_short']
return [v, w]
} // SHORT_PARSER
function BYTE_PARSER(value, format) {
const [v, w] = INTEGER_PARSER(value, format)
if (w !== null) return [null, 'invalid_byte']
if (v > 127 || v < -128) return [null, 'invalid_byte']
return [v, w]
} // BYTE_PARSER
function NONNEGATIVE_INTEGER_PARSER(value, format) {
const [v, w] = INTEGER_PARSER(value, format)
if (w !== null) return [null, 'invalid_nonNegativeInteger']
if (v < 0) return [null, 'invalid_nonNegativeInteger']
return [v, w]
} // NONNEGATIVE_INTEGER_PARSER
function POSITIVE_INTEGER_PARSER(value, format) {
const [v, w] = INTEGER_PARSER(value, format)
if (w !== null) return [null, 'invalid_positiveInteger']
if (v <= 0) return [null, 'invalid_positiveInteger']
return [v, w]
} // POSITIVE_INTEGER_PARSER
function UNSIGNED_LONG_PARSER(value, format) {
const [v, w] = NONNEGATIVE_INTEGER_PARSER(value, format)
if (w !== null) return [null, 'invalid_unsignedLong']
if (v > 18446744073709551615) return [null, 'invalid_unsignedLong']
return [v, w]
} // UNSIGNED_LONG_PARSER
function UNSIGNED_INT_PARSER(value, format) {
const [v, w] = NONNEGATIVE_INTEGER_PARSER(value, format)
if (w !== null) return [null, 'invalid_unsignedInt']
if (v > 4294967295) return [null, 'invalid_unsignedInt']
return [v, w]
} // UNSIGNED_INT_PARSER
function UNSIGNED_SHORT_PARSER(value, format) {
const [v, w] = NONNEGATIVE_INTEGER_PARSER(value, format)
if (w !== null) return [null, 'invalid_unsignedShort']
if (v > 65535) return [null, 'invalid_unsignedShort']
return [v, w]
} // UNSIGNED_SHORT_PARSER
function UNSIGNED_BYTE_PARSER(value, format) {
const [v, w] = NONNEGATIVE_INTEGER_PARSER(value, format)
if (w !== null) return [null, 'invalid_unsignedByte']
if (v > 256) return [null, 'invalid_unsignedByte']
return [v, w]
} // UNSIGNED_BYTE_PARSER
function NONPOSITIVE_INTEGER_PARSER(value, format) {
const [v, w] = INTEGER_PARSER(value, format)
if (w !== null) return [null, 'invalid_nonPositiveInteger']
if (v >= 0) return [null, 'invalid_nonPositiveInteger']
return [v, w]
} // NONPOSITIVE_INTEGER_PARSER
function NEGATIVE_INTEGER_PARSER(value, format) {
const [v, w] = INTEGER_PARSER(value, format)
if (w !== null) return [null, 'invalid_negativeInteger']
if (v > 0) return [null, 'invalid_negativeInteger']
return [v, w]
} // NEGATIVE_INTEGER_PARSER
const DATATYPE_PARSER = {
"http://www.w3.org/1999/02/22-rdf-syntax-ns#XMLLiteral": TRIM_VALUE,
"http://www.w3.org/1999/02/22-rdf-syntax-ns#HTML": TRIM_VALUE,
"http://www.w3.org/ns/csvw#JSON": TRIM_VALUE,
"http://www.w3.org/2001/XMLSchema#anyAtomicType": ALL_VALUES_VALID,
"http://www.w3.org/2001/XMLSchema#anyURI": TRIM_VALUE,
"http://www.w3.org/2001/XMLSchema#base64Binary": TRIM_VALUE,
"http://www.w3.org/2001/XMLSchema#boolean": BOOLEAN_PARSER,
"http://www.w3.org/2001/XMLSchema#date":
createDateParser("http://www.w3.org/2001/XMLSchema#date", 'invalid_date'),
"http://www.w3.org/2001/XMLSchema#dateTime":
createDateParser("http://www.w3.org/2001/XMLSchema#dateTime", 'invalid_date_time'),
"http://www.w3.org/2001/XMLSchema#dateTimeStamp":
createDateParser("http://www.w3.org/2001/XMLSchema#dateTimeStamp", 'invalid_date_time_stamp'),
"http://www.w3.org/2001/XMLSchema#decimal": DECIMAL_PARSER,
"http://www.w3.org/2001/XMLSchema#integer": INTEGER_PARSER,
"http://www.w3.org/2001/XMLSchema#long": LONG_PARSER,
"http://www.w3.org/2001/XMLSchema#int": INT_PARSER,
"http://www.w3.org/2001/XMLSchema#short": SHORT_PARSER,
"http://www.w3.org/2001/XMLSchema#byte": BYTE_PARSER,
"http://www.w3.org/2001/XMLSchema#nonNegativeInteger": NONNEGATIVE_INTEGER_PARSER,
"http://www.w3.org/2001/XMLSchema#positiveInteger": POSITIVE_INTEGER_PARSER,
"http://www.w3.org/2001/XMLSchema#unsignedLong": UNSIGNED_LONG_PARSER,
"http://www.w3.org/2001/XMLSchema#unsignedInt": UNSIGNED_INT_PARSER,
"http://www.w3.org/2001/XMLSchema#unsignedShort": UNSIGNED_SHORT_PARSER,
"http://www.w3.org/2001/XMLSchema#unsignedByte": UNSIGNED_BYTE_PARSER,
"http://www.w3.org/2001/XMLSchema#nonPositiveInteger": NONPOSITIVE_INTEGER_PARSER,
"http://www.w3.org/2001/XMLSchema#negativeInteger": NEGATIVE_INTEGER_PARSER,
"http://www.w3.org/2001/XMLSchema#double": NUMERIC_PARSER,
// regular expressions here taken from XML Schema datatypes spec
"http://www.w3.org/2001/XMLSchema#duration":
createRegexpBasedParser(/-?P((([0-9]+Y([0-9]+M)?([0-9]+D)?|([0-9]+M)([0-9]+D)?|([0-9]+D))(T(([0-9]+H)([0-9]+M)?([0-9]+(\.[0-9]+)?S)?|([0-9]+M)([0-9]+(\.[0-9]+)?S)?|([0-9]+(\.[0-9]+)?S)))?)|(T(([0-9]+H)([0-9]+M)?([0-9]+(\.[0-9]+)?S)?|([0-9]+M)([0-9]+(\.[0-9]+)?S)?|([0-9]+(\.[0-9]+)?S))))/, 'invalid_duration'),
"http://www.w3.org/2001/XMLSchema#dayTimeDuration":
createRegexpBasedParser(/-?P(([0-9]+D(T(([0-9]+H)([0-9]+M)?([0-9]+(\.[0-9]+)?S)?|([0-9]+M)([0-9]+(\.[0-9]+)?S)?|([0-9]+(\.[0-9]+)?S)))?)|(T(([0-9]+H)([0-9]+M)?([0-9]+(\.[0-9]+)?S)?|([0-9]+M)([0-9]+(\.[0-9]+)?S)?|([0-9]+(\.[0-9]+)?S))))/, 'invalid_dayTimeDuration'),
"http://www.w3.org/2001/XMLSchema#yearMonthDuration":
createRegexpBasedParser(/-?P([0-9]+Y([0-9]+M)?|([0-9]+M))/, 'invalid_duration'),
"http://www.w3.org/2001/XMLSchema#float": NUMERIC_PARSER,
"http://www.w3.org/2001/XMLSchema#gDay":
createDateParser("http://www.w3.org/2001/XMLSchema#gDay", 'invalid_gDay'),
"http://www.w3.org/2001/XMLSchema#gMonth":
createDateParser("http://www.w3.org/2001/XMLSchema#gMonth", 'invalid_gMonth'),
"http://www.w3.org/2001/XMLSchema#gMonthDay":
createDateParser("http://www.w3.org/2001/XMLSchema#gMonthDay", 'invalid_gMonthDay'),
"http://www.w3.org/2001/XMLSchema#gYear":
createDateParser("http://www.w3.org/2001/XMLSchema#gYear", 'invalid_gYear'),
"http://www.w3.org/2001/XMLSchema#gYearMonth":
createDateParser("http://www.w3.org/2001/XMLSchema#gYearMonth", 'invalid_gYearMonth'),
"http://www.w3.org/2001/XMLSchema#hexBinary": TRIM_VALUE,
"http://www.w3.org/2001/XMLSchema#QName": TRIM_VALUE,
"http://www.w3.org/2001/XMLSchema#string": ALL_VALUES_VALID,
"http://www.w3.org/2001/XMLSchema#normalizedString": TRIM_VALUE,
"http://www.w3.org/2001/XMLSchema#token": TRIM_VALUE,
"http://www.w3.org/2001/XMLSchema#language": TRIM_VALUE,
"http://www.w3.org/2001/XMLSchema#Name": TRIM_VALUE,
"http://www.w3.org/2001/XMLSchema#NMTOKEN": TRIM_VALUE,
"http://www.w3.org/2001/XMLSchema#time":
createDateParser("http://www.w3.org/2001/XMLSchema#time", 'invalid_time')
}
module.exports = (...args) => new Column(...args)
module.exports.fromJson = Column.fromJson
<file_sep>/* eslint-env mocha */
const expect = require('chai').expect
const CsvlintCsvwDateFormat = require('../../lib/csvlint/csvw/date-format')
describe('Csvlint::Csvw::DateFormat', () => {
it('should parse dates that match yyyy-MM-dd correctly', () => {
const format = CsvlintCsvwDateFormat('yyyy-MM-dd')
expect(format.parse('2015-03-22').dateTime).to.eql(new Date(2015, 2, 22))
expect(format.parse('2015-02-30')).to.eql(null)
expect(format.parse('22/03/2015')).to.eql(null)
})
it('should parse times that match HH:mm:ss correctly', () => {
const format = CsvlintCsvwDateFormat('HH:mm:ss')
expect(format.parse('12:34:56')).to.eql({ hour: 12, minute: 34, second: 56.0, string: '12:34:56', dateTime: new Date(0, 1, 1, 12, 34, 56.0, '+00:00') })
expect(format.parse('22/03/2015')).to.eql(null)
})
it('should parse times that match HH:mm:ss.SSS correctly', () => {
const format = CsvlintCsvwDateFormat('HH:mm:ss.SSS')
expect(format.parse('12:34:56')).to.eql({ hour: 12, minute: 34, second: 56.0, string: '12:34:56', dateTime: new Date(0, 1, 1, 12, 34, 56.0, '+00:00') })
expect(format.parse('12:34:56.78')).to.eql({ hour: 12, minute: 34, second: 56.78, string: '12:34:56.78', dateTime: new Date(0, 1, 1, 12, 34, 56.78, '+00:00') })
expect(format.parse('12:34:56.789')).to.eql({ hour: 12, minute: 34, second: 56.789, string: '12:34:56.789', dateTime: new Date(0, 1, 1, 12, 34, 56.789, '+00:00') })
expect(format.parse('12:34:56.7890')).to.eql(null)
expect(format.parse('22/03/2015')).to.eql(null)
})
it('should parse dateTimes that match yyyy-MM-ddTHH:mm:ss correctly', () => {
const format = CsvlintCsvwDateFormat('yyyy-MM-ddTHH:mm:ss')
expect(format.parse('2015-03-15T15:02:37').dateTime).to.eql(new Date(2015, 2, 15, 15, 2, 37))
expect(format.parse('12:34:56')).to.eql(null)
expect(format.parse('22/03/2015')).to.eql(null)
})
it('should parse dateTimes that match yyyy-MM-ddTHH:mm:ss.S correctly', () => {
const format = CsvlintCsvwDateFormat('yyyy-MM-ddTHH:mm:ss.S')
expect(format.parse('2015-03-15T15:02:37').dateTime).to.eql(new Date(2015, 2, 15, 15, 2, 37.0))
expect(format.parse('2015-03-15T15:02:37.4').dateTime).to.eql(new Date(2015, 2, 15, 15, 2, 37, 400))
expect(format.parse('2015-03-15T15:02:37.45')).to.eql(null)
expect(format.parse('12:34:56')).to.eql(null)
expect(format.parse('22/03/2015')).to.eql(null)
})
it('should parse dateTimes that match M/d/yyyy HH:mm correctly', () => {
const format = CsvlintCsvwDateFormat('M/d/yyyy HH:mm')
expect(format.parse('2015-03-15T15:02:37')).to.eql(null)
expect(format.parse('3/15/2015 15:02').dateTime).to.eql(new Date(2015, 2, 15, 15, 2))
})
})
<file_sep>function metadataError (path, msg = '') {
const fullMsg = `${path} ${msg}`
const err = new Error(fullMsg)
err.name = 'Metadata Error'
throw err
}
module.exports = metadataError
<file_sep>const XRegExp = require('xregexp')
const { DateTime } = require('luxon')
class DateFormat {
get pattern () { return this.pattern_ }
constructor (pattern, datatype = null) {
this.pattern_ = pattern
if (this.pattern_ === null) {
this.regexp_ = DEFAULT_REGEXP[datatype]
this.type_ = datatype
} else {
/*
test_pattern = pattern.clone
test_pattern.gsub!(/S+/, "")
FIELDS.keys.sort_by{|f| -f.length}.each do |field|
test_pattern.gsub!(field, "")
end
raise Csvw::DateFormatError, "unrecognised date field symbols in date format" if test_pattern =~ /[GyYuUrQqMLlwWdDFgEecahHKkjJmsSAzZOvVXx]/
*/
this.regexp_ = DATE_PATTERN_REGEXP[this.pattern_]
this.type_ = !this.regexp_ ? 'http://www.w3.org/2001/XMLSchema#time' : 'http://www.w3.org/2001/XMLSchema#date'
this.regexp_ = this.regexp_ || TIME_PATTERN_REGEXP[this.pattern_]
this.type_ = !this.regexp_ ? 'http://www.w3.org/2001/XMLSchema#dateTime' : this.type_
this.regexp_ = this.regexp_ || DATE_TIME_PATTERN_REGEXP[this.pattern_]
if (!this.regexp_) {
let regexp = this.pattern_
const hasYear = /yyyy/.test(regexp)
const hasHour = /HH/.test(regexp)
if (hasYear && !hasHour) this.type_ = 'http://www.w3.org/2001/XMLSchema#date'
if (!hasYear && hasHour) this.type_ = 'http://www.w3.org/2001/XMLSchema#time'
if (hasYear && hasHour) this.type_ = 'http://www.w3.org/2001/XMLSchema#dateTime'
regexp = regexp.replace('HH', FIELDS.HH)
regexp = regexp.replace('mm', FIELDS.mm)
if (/ss\.S+/.test(this.pattern_)) {
const maxFractionalSeconds = this.pattern_.split('.').reverse()[0].length
regexp = regexp.replace(/ss\.S+$/, `(?<second>${FIELDS.ss}(\.[0-9]{1,${maxFractionalSeconds}})?)`)
} else {
regexp = regexp.replace('ss', `(?<second>${FIELDS.ss})`)
}
if (hasYear) {
for (const field of ['yyyy', 'MM', 'M', 'dd']) {
regexp = regexp.replace(field, FIELDS[field])
}
regexp = regexp.replace(/d(?=[-T \/\.])/, FIELDS.d)
}
for (const field of ['XXX', 'XX', 'X', 'xxx', 'xx']) {
regexp = regexp.replace(field, FIELDS[field])
}
regexp = regexp.replace(/x(?!:)/, FIELDS.x)
this.regexp_ = XRegExp(`^${regexp}$`)
}
}
} // constructor
match (value) {
return this.regexp_.test(value)
} // match
parse (input) {
const match = XRegExp.exec(input, this.regexp_)
if (match === null) {
return null
}
const value = {}
for (const field of this.regexp_.xregexp.captureNames) {
if (field === null || match[field] === null) continue
switch (field) {
case 'timezone': {
let tz = match.timezone
if (tz === 'Z') tz = '+00:00'
if (tz.length === 3) tz += ':00'
if (!/:/.test(tz)) tz = `${tz.substring(0, 3)}:${tz.substring(3)}`
value.timezone = tz
}
break
case 'second': {
value.second = Number.parseFloat(match.second)
}
break
default:
value[field] = Number.parseInt(match[field])
}
}
switch (this.type_) {
case 'http://www.w3.org/2001/XMLSchema#date': {
const date = DateTime.fromObject({
year: value.year,
month: value.month,
day: value.day
})
if (!date.isValid) return null
value.dateTime = date.toJSDate()
}
break
case 'http://www.w3.org/2001/XMLSchema#dateTime': {
const valueSecond = value.second ? value.second : 0
const second = Math.floor(valueSecond)
const millisecond = Math.round((valueSecond - second) * 1000)
const date = DateTime.fromObject({
year: value.year,
month: value.month,
day: value.day,
hour: value.hour,
minute: value.minute,
second: second,
millisecond: millisecond,
zone: match.timezone && Number.parseInt(match.timezone) ? match.timezone : 'local'
})
if (!date.isValid) return null
value.dateTime = date.toJSDate()
}
break
default:
value.dateTime = new Date(
value.year || 0,
value.month || 1,
value.day || 1,
value.hour || 0,
value.minute || 0,
value.second || 0,
value.timezone || '+00:00'
)
}
if (value.year) {
if (value.month) {
if (value.day) {
if (value.hour) {
// dateTime
value.string = `${year(value)}-${month(value)}-${day(value)}T${hour(value)}:${minute(value, 0)}:${second(value)}${timezone(value)}`
} else {
// date
value.string = `${year(value)}-${month(value)}-${day(value)}${timezone(value)}`
}
} else {
// gYearMonth
value.string = `${year(value)}-${month(value)}${timezone(value)}`
}
} else {
// gYear
value.string = `${year(value)}${timezone(value)}`
}
} else if (value.month) {
if (value.day) {
// gMonthDay
value.string = `--${month(value)}-${day(value)}${timezone(value)}`
} else {
// gMonth
value.string = `--${month(value)}${timezone(value)}`
}
} else if (value.day) {
// gDay
value.string = `---${day(value)}${timezone(value)}`
} else {
value.string = `${hour(value)}:${minute(value)}:${second(value)}${timezone(value)}`
}
return value
} // parse
} // class DateFormat
function year (value) { return formatNumber(value.year, 4) }
function month (value) { return formatNumber(value.month, 2) }
function day (value) { return formatNumber(value.day, 2) }
function hour (value) { return formatNumber(value.hour, 2) }
function minute (value, def) { return formatNumber(value.minute || def, 2) }
function second (value) { return formatNumber(value.second, 2) }
function timezone (value) { return value.timezone ? value.timezone.replace('+00:00', 'Z') : '' }
function formatNumber (n, width) {
return `${n}`.padStart(width, 0)
}
function dateFormatError (msg = '') {
const err = new Error(msg)
err.name = 'Date Format Error'
throw err
} // dateFormatError
const FIELDS = {
yyyy: '(?<year>-?([1-9][0-9]{3,}|0[0-9]{3}))',
MM: '(?<month>0[1-9]|1[0-2])',
M: '(?<month>[1-9]|1[0-2])',
dd: '(?<day>3[01]|[12][0-9]|0[1-9])',
d: '(?<day>3[01]|[12][0-9]|[1-9])',
HH: '(?<hour>[01][0-9]|2[0-3])',
mm: '(?<minute>[0-5][0-9])',
ss: '([0-6][0-9])',
X: '(?<timezone>Z|[-+]((0[0-9]|1[0-3])([0-5][0-9])?|14(00)?))',
XX: '(?<timezone>Z|[-+]((0[0-9]|1[0-3])[0-5][0-9]|1400))',
XXX: '(?<timezone>Z|[-+]((0[0-9]|1[0-3]):[0-5][0-9]|14:00))',
x: '(?<timezone>[-+]((0[0-9]|1[0-3])([0-5][0-9])?|14(00)?))',
xx: '(?<timezone>[-+]((0[0-9]|1[0-3])[0-5][0-9]|1400))',
xxx: '(?<timezone>[-+]((0[0-9]|1[0-3]):[0-5][0-9]|14:00))'
}
const DATE_PATTERN_REGEXP = {
'yyyy-MM-dd': XRegExp(`^${FIELDS.yyyy}-${FIELDS.MM}-${FIELDS.dd}$`),
yyyyMMdd: XRegExp(`^${FIELDS.yyyy}${FIELDS.MM}${FIELDS.dd}$`),
'dd-MM-yyyy': XRegExp(`^${FIELDS.dd}-${FIELDS.MM}-${FIELDS.yyyy}$`),
'd-M-yyyy': XRegExp(`^${FIELDS.d}-${FIELDS.M}-${FIELDS.yyyy}$`),
'MM-dd-yyyy': XRegExp(`^${FIELDS.MM}-${FIELDS.dd}-${FIELDS.yyyy}$`),
'M-d-yyyy': XRegExp(`^${FIELDS.M}-${FIELDS.d}-${FIELDS.yyyy}$`),
'dd/MM/yyyy': XRegExp(`^${FIELDS.dd}/${FIELDS.MM}/${FIELDS.yyyy}$`),
'd/M/yyyy': XRegExp(`^${FIELDS.d}/${FIELDS.M}/${FIELDS.yyyy}$`),
'MM/dd/yyyy': XRegExp(`^${FIELDS.MM}/${FIELDS.dd}/${FIELDS.yyyy}$`),
'M/d/yyyy': XRegExp(`^${FIELDS.M}/${FIELDS.d}/${FIELDS.yyyy}$`),
'dd.MM.yyyy': XRegExp(`^${FIELDS.dd}.${FIELDS.MM}.${FIELDS.yyyy}$`),
'd.M.yyyy': XRegExp(`^${FIELDS.d}.${FIELDS.M}.${FIELDS.yyyy}$`),
'MM.dd.yyyy': XRegExp(`^${FIELDS.MM}.${FIELDS.dd}.${FIELDS.yyyy}$`),
'M.d.yyyy': XRegExp(`^${FIELDS.M}.${FIELDS.d}.${FIELDS.yyyy}$`)
}
const TIME_PATTERN_REGEXP = {
'HH:mm:ss': XRegExp(`^${FIELDS.HH}:${FIELDS.mm}:(?<second>${FIELDS.ss})$`),
HHmmss: XRegExp(`^${FIELDS.HH}${FIELDS.mm}(?<second>${FIELDS.ss})$`),
'HH:mm': XRegExp(`^${FIELDS.HH}:${FIELDS.mm}$`),
HHmm: XRegExp(`^${FIELDS.HH}${FIELDS.mm}$`)
}
const DATE_TIME_PATTERN_REGEXP = {
'yyyy-MM-ddTHH:mm:ss': XRegExp(`^${FIELDS.yyyy}-${FIELDS.MM}-${FIELDS.dd}T${FIELDS.HH}:${FIELDS.mm}:(?<second>${FIELDS.ss})$`),
'yyyy-MM-ddTHH:mm': XRegExp(`^${FIELDS.yyyy}-${FIELDS.MM}-${FIELDS.dd}T${FIELDS.HH}:${FIELDS.mm}$`)
}
const DEFAULT_REGEXP = {
'http://www.w3.org/2001/XMLSchema#date': XRegExp(`^${FIELDS.yyyy}-${FIELDS.MM}-${FIELDS.dd}${FIELDS.XXX}?$`),
'http://www.w3.org/2001/XMLSchema#dateTime': XRegExp(`^${FIELDS.yyyy}-${FIELDS.MM}-${FIELDS.dd}T${FIELDS.HH}:${FIELDS.mm}:(?<second>${FIELDS.ss}(\.[0-9]+)?)${FIELDS.XXX}?$`),
'http://www.w3.org/2001/XMLSchema#dateTimeStamp': XRegExp(`^${FIELDS.yyyy}-${FIELDS.MM}-${FIELDS.dd}T${FIELDS.HH}:${FIELDS.mm}:(?<second>${FIELDS.ss}(\.[0-9]+)?)${FIELDS.XXX}$`),
'http://www.w3.org/2001/XMLSchema#gDay': XRegExp(`^---${FIELDS.dd}${FIELDS.XXX}?$`),
'http://www.w3.org/2001/XMLSchema#gMonth': XRegExp(`^--${FIELDS.MM}${FIELDS.XXX}?$`),
'http://www.w3.org/2001/XMLSchema#gMonthDay': XRegExp(`^--${FIELDS.MM}-${FIELDS.dd}${FIELDS.XXX}?$`),
'http://www.w3.org/2001/XMLSchema#gYear': XRegExp(`^${FIELDS.yyyy}${FIELDS.XXX}?$`),
'http://www.w3.org/2001/XMLSchema#gYearMonth': XRegExp(`^${FIELDS.yyyy}-${FIELDS.MM}${FIELDS.XXX}?$`),
'http://www.w3.org/2001/XMLSchema#time': XRegExp(`^${FIELDS.HH}:${FIELDS.mm}:(?<second>${FIELDS.ss}(\.[0-9]+)?)${FIELDS.XXX}?$`)
}
module.exports = (pattern, datetype) =>
new DateFormat(pattern, datetype)
<file_sep>/* eslint-env mocha */
const expect = require('chai').expect
const CsvlintField = require('../lib/csvlint/field')
describe('Csvlint::Field', () => {
it('should validate required fields', () => {
const field = new CsvlintField('test', { required: true })
expect(field.validateColumn(null)).to.eql(false)
expect(field.errors[0].category).to.eql('schema')
expect(field.validateColumn('')).to.eql(false)
expect(field.validateColumn('data')).to.eql(true)
})
it('should include the failed constraints', () => {
const field = new CsvlintField('test', { required: true })
expect(field.validateColumn(null)).to.eql(false)
expect(field.errors[0].constraints).to.eql({ required: true })
})
it('should validate minimum length', () => {
const field = new CsvlintField('test', { minLength: 3 })
expect(field.validateColumn(null)).to.eql(false)
expect(field.validateColumn('')).to.eql(false)
expect(field.validateColumn('ab')).to.eql(false)
expect(field.validateColumn('abc')).to.eql(true)
expect(field.validateColumn('abcd')).to.eql(true)
})
it('should validate maximum length', () => {
const field = new CsvlintField('test', { maxLength: 3 })
expect(field.validateColumn(null)).to.eql(true)
expect(field.validateColumn('')).to.eql(true)
expect(field.validateColumn('ab')).to.eql(true)
expect(field.validateColumn('abc')).to.eql(true)
expect(field.validateColumn('abcd')).to.eql(false)
})
it('should validate against regex', () => {
const field = new CsvlintField('test', { pattern: '\\{[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}\\}' })
expect(field.validateColumn('abc')).to.eql(false)
expect(field.validateColumn('{3B0DA29C-C89A-4FAA-918A-0000074FA0E0}')).to.eql(true)
})
it('should apply combinations of constraints', () => {
const field = new CsvlintField('test', {
required: true,
pattern: '\\{[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}\\}'
})
expect(field.validateColumn('abc')).to.eql(false)
expect(field.errors[0].constraints).to.eql({ pattern: '\\{[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}\\}' })
expect(field.validateColumn(null)).to.eql(false)
expect(field.errors[0].constraints).to.eql({ required: true })
expect(field.validateColumn('{3B0DA29C-C89A-4FAA-918A-0000074FA0E0}')).to.eql(true)
})
it('should enforce uniqueness for a column', () => {
const field = new CsvlintField('test', { unique: true })
expect(field.validateColumn('abc')).to.eql(true)
expect(field.validateColumn('abc')).to.eql(false)
expect(field.errors[0].category).to.eql('schema')
expect(field.errors[0].type).to.eql('unique')
})
describe('validate correct types', () => {
it('skips empty fields', () => {
const field = new CsvlintField('test', { type: 'http://www.w3.org/2001/XMLSchema#int' })
expect(field.validateColumn('')).to.eql(true)
})
it('validates strings', () => {
const field = new CsvlintField('test', { type: 'http://www.w3.org/2001/XMLSchema#string' })
expect(field.validateColumn('42')).to.eql(true)
expect(field.validateColumn('forty-two')).to.eql(true)
})
it('validates ints', () => {
const field = new CsvlintField('test', { type: 'http://www.w3.org/2001/XMLSchema#int' })
expect(field.validateColumn('42')).to.eql(true)
expect(field.validateColumn('42.1')).to.eql(false)
expect(field.validateColumn('forty-two')).to.eql(false)
expect(field.validateColumn('42 is forty-two')).to.eql(false)
})
it('validates integers', () => {
const field = new CsvlintField('test', { type: 'http://www.w3.org/2001/XMLSchema#integer' })
expect(field.validateColumn('42')).to.eql(true)
expect(field.validateColumn('forty-two')).to.eql(false)
})
it('validates floats', () => {
const field = new CsvlintField('test', { type: 'http://www.w3.org/2001/XMLSchema#float' })
expect(field.validateColumn('42.0')).to.eql(true)
expect(field.validateColumn('42')).to.eql(true)
expect(field.validateColumn('42.')).to.eql(false)
expect(field.validateColumn('forty-two')).to.eql(false)
expect(field.validateColumn('42 is forty-two')).to.eql(false)
})
it('validates URIs', () => {
const field = new CsvlintField('test', { type: 'http://www.w3.org/2001/XMLSchema#anyURI' })
expect(field.validateColumn('http://theodi.org/team')).to.eql(true)
expect(field.validateColumn('https://theodi.org/team')).to.eql(true)
expect(field.validateColumn('42.0')).to.eql(false)
})
it('works with invalid URIs', () => {
const field = new CsvlintField('test', { type: 'http://www.w3.org/2001/XMLSchema#anyURI' })
expect(field.validateColumn('ยฃ123')).to.eql(false)
})
it('validates booleans', () => {
const field = new CsvlintField('test', { type: 'http://www.w3.org/2001/XMLSchema#boolean' })
expect(field.validateColumn('true')).to.eql(true)
expect(field.validateColumn('1')).to.eql(true)
expect(field.validateColumn('false')).to.eql(true)
expect(field.validateColumn('0')).to.eql(true)
expect(field.validateColumn('derp')).to.eql(false)
})
describe('should validate all kinds of integers', () => {
it('validates a non-positive integer', () => {
const field = new CsvlintField('test', { type: 'http://www.w3.org/2001/XMLSchema#nonPositiveInteger' })
expect(field.validateColumn('0')).to.eql(true)
expect(field.validateColumn('-1')).to.eql(true)
expect(field.validateColumn('1')).to.eql(false)
})
it('validates a negative integer', () => {
const field = new CsvlintField('test', { type: 'http://www.w3.org/2001/XMLSchema#negativeInteger' })
expect(field.validateColumn('0')).to.eql(false)
expect(field.validateColumn('-1')).to.eql(true)
expect(field.validateColumn('1')).to.eql(false)
})
it('validates a non-negative integer', () => {
const field = new CsvlintField('test', { type: 'http://www.w3.org/2001/XMLSchema#nonNegativeInteger' })
expect(field.validateColumn('0')).to.eql(true)
expect(field.validateColumn('-1')).to.eql(false)
expect(field.validateColumn('1')).to.eql(true)
})
it('validates a positive integer', () => {
const field = new CsvlintField('test', { type: 'http://www.w3.org/2001/XMLSchema#positiveInteger' })
expect(field.validateColumn('0')).to.eql(false)
expect(field.validateColumn('-1')).to.eql(false)
expect(field.errors[0].constraints).to.eql({ type: 'http://www.w3.org/2001/XMLSchema#positiveInteger' })
expect(field.validateColumn('1')).to.eql(true)
})
})
describe('when validating ranges', () => {
it('should enforce minimum values', () => {
const field = new CsvlintField('test', {
type: 'http://www.w3.org/2001/XMLSchema#int',
minimum: '40'
})
expect(field.validateColumn('42')).to.eql(true)
expect(field.validateColumn('39')).to.eql(false)
expect(field.errors[0].type).to.eql('below_minimum')
})
it('should enforce maximum values', () => {
const field = new CsvlintField('test', {
type: 'http://www.w3.org/2001/XMLSchema#int',
maximum: '40'
})
expect(field.validateColumn('39')).to.eql(true)
expect(field.validateColumn('41')).to.eql(false)
expect(field.errors[0].type).to.eql('above_maximum')
})
})
describe('when validating dates', () => {
it('should validate a date time', () => {
const field = new CsvlintField('test', {
type: 'http://www.w3.org/2001/XMLSchema#dateTime'
})
expect(field.validateColumn('2014-02-17T11:09:00Z')).to.eql(true)
expect(field.validateColumn('invalid-date')).to.eql(false)
expect(field.validateColumn('2014-02-17')).to.eql(false)
})
it('should validate a date', () => {
const field = new CsvlintField('test', {
type: 'http://www.w3.org/2001/XMLSchema#date'
})
expect(field.validateColumn('2014-02-17T11:09:00Z')).to.eql(false)
expect(field.validateColumn('invalid-date')).to.eql(false)
expect(field.validateColumn('2014-02-17')).to.eql(true)
})
it('should validate a time', () => {
const field = new CsvlintField('test', {
type: 'http://www.w3.org/2001/XMLSchema#time'
})
expect(field.validateColumn('11:09:00')).to.eql(true)
expect(field.validateColumn('2014-02-17T11:09:00Z')).to.eql(false)
expect(field.validateColumn('not-a-time')).to.eql(false)
expect(field.validateColumn('27:97:00')).to.eql(false)
})
it('should validate a year', () => {
const field = new CsvlintField('test', {
type: 'http://www.w3.org/2001/XMLSchema#gYear'
})
expect(field.validateColumn('1999')).to.eql(true)
expect(field.validateColumn('2525')).to.eql(true)
expect(field.validateColumn('0001')).to.eql(true)
expect(field.validateColumn('2014-02-17T11:09:00Z')).to.eql(false)
expect(field.validateColumn('not-a-time')).to.eql(false)
expect(field.validateColumn('27:97:00')).to.eql(false)
})
it('should validate a year-month', () => {
const field = new CsvlintField('test', {
type: 'http://www.w3.org/2001/XMLSchema#gYearMonth'
})
expect(field.validateColumn('1999-12')).to.eql(true)
expect(field.validateColumn('2525-01')).to.eql(true)
expect(field.validateColumn('2014-02-17T11:09:00Z')).to.eql(false)
expect(field.validateColumn('not-a-time')).to.eql(false)
expect(field.validateColumn('27:97:00')).to.eql(false)
})
it('should allow user to specify custom date time pattern', () => {
const field = new CsvlintField('test', {
type: 'http://www.w3.org/2001/XMLSchema#dateTime',
datePattern: '%Y-%m-%d %H:%M:%S'
})
expect(field.validateColumn('1999-12-01 10:00:00')).to.eql(true)
expect(field.validateColumn('invalid-date')).to.eql(false)
expect(field.validateColumn('2014-02-17')).to.eql(false)
expect(field.errors[0].constraints).to.eql({
type: 'http://www.w3.org/2001/XMLSchema#dateTime',
datePattern: '%Y-%m-%d %H:%M:%S'
})
})
it('should allow user to compare dates', () => {
const field = new CsvlintField('test', {
type: 'http://www.w3.org/2001/XMLSchema#dateTime',
datePattern: '%Y-%m-%d %H:%M:%S',
minimum: '1990-01-01 10:00:00'
})
expect(field.validateColumn('1999-12-01 10:00:00')).to.eql(true)
expect(field.validateColumn('1989-12-01 10:00:00')).to.eql(false)
})
})
})
})
<file_sep>const ErrorMessage = require('./error-message')
class ErrorCollector {
constructor (initial = null) {
this.reset()
if (initial) {
this.errors_.push(...initial.errors)
this.warnings_.push(...initial.warnings)
this.infoMessages_.push(...initial.infoMessages)
}
} // constructor
get errors () { return this.errors_ }
get warnings () { return this.warnings_ }
get infoMessages () { return this.infoMessages_ }
get isValid () { return this.errors_.length === 0 }
buildError (
type,
category = null,
row = null,
column = null,
content = null,
constraints = {}
) {
this.errors_.push(new ErrorMessage(type, category, row, column, content, constraints))
} // buildError
buildWarning (
type,
category = null,
row = null,
column = null,
content = null,
constraints = {}
) {
this.warnings_.push(new ErrorMessage(type, category, row, column, content, constraints))
} // buildWarning
buildInfoMessage (
type,
category = null,
row = null,
column = null,
content = null,
constraints = {}
) {
this.infoMessages_.push(new ErrorMessage(type, category, row, column, content, constraints))
} // buildError
reset () {
this.errors_ = []
this.warnings_ = []
this.infoMessages_ = []
} // reset
} // ErrorCollector
module.exports = ErrorCollector
<file_sep>const Schema = require('./schema')
const ErrorCollector = require('./error-collector')
const fs = require('fs')
const csvparse = require('csv-parse')
const fetch = require('node-fetch')
const defaultDialect = {
header: true,
headerRowCount: 1,
delimiter: ',',
skipInitialSpace: true,
lineTerminator: 'auto',
quoteChar: '"',
trim: true
}
class Validator {
constructor (
source,
dialect = {},
schema = null,
options = {}
) {
this.source_ = source
this.formats_ = []
this.schema_ = schema
this.dialect_ = dialect
this.csvHeader_ = true
this.headers_ = {}
this.lambda_ = options.lambda
this.validate_ = options.validate || true
this.limitLines_ = options.limit_lines
// this.extension_ = parseExtension(source)
this.expectedColumns_ = 0
this.colCounts_ = []
this.lineBreaksReported_ = false
this.lineBreaks_ = new Set()
this.errors_ = new ErrorCollector(schema)
this.data_ = []
} // constructor
get encoding () { return this.encoding_ }
get contentType () { return this.contentType_ }
get extension () { return this.extension_ }
get headers () { return this.headers_ }
get linkHeaders () { return this.linkHeaders_ }
get dialect () { return this.dialect_ }
get hasHeader () { return this.csvHeader_ && this.dialect.header }
get csvOptions () { return this.csvOptions_ }
get schema () { return this.schema_ }
get data () { return this.data_ }
get rowCount () { return this.data_.length }
get errors () { return this.errors_.errors }
get warnings () { return this.errors_.warnings }
get infoMessages () { return this.errors_.infoMessages }
get isValid () { return this.errors_.isValid }
get lineBreaksReports() { return this.lineBreaksReported_ }
async validate () {
// excel warning
this.locateSchema()
this.setDialect()
const [headers, sourceStream] =
await this.openSourceStream(this.source_)
this.validateMetaData(headers)
await this.validateStream(sourceStream)
this.finish()
} // validate
async validateStream (sourceStream) {
const parser = csvparse({
skip_lines_with_error: true,
on_record: (record, ctx) => {
this.validateLine(record, ctx.lines, currentLine)
this.data_.push(record)
if (this.lambda_) this.lambda_(record, ctx.lines)
}
})
let currentLine
parser.on('skip', err => {
if (this.buildExceptionMessage(err, currentLine)) {
this.data_.push(null)
}
})
for await (const line of chunksToLines(sourceStream)) {
currentLine = line
parser.write(line)
} // for ...
parser.end()
} // parseLine
validateLine (record, lineNumber = null, lineContents) {
this.reportLineBreaks(lineContents)
const colCount = record.filter(col => col).length
this.colCounts_.push(colCount)
if (lineNumber <= 1 && this.csvHeader_) {
this.validateHeader(record)
return
}
this.buildFormats (record)
this.expectedColumns_ = this.expectedColumns_ || record.length
if (colCount === 0) {
this.buildError('blank_rows', 'structure', lineNumber, null, lineContents)
}
// Builds errors and warnings related to the provided schema file
if (this.schema_) {
this.schema_.validateRow(record, lineNumber, [], this.source_, this.validate_) // not entirely sure what allErrors is doing here
this.errors_.errors.push(...this.schema_.errors)
this.errors_.warnings.push(...this.schema_.warnings)
} else {
if (record.length !== this.expectedColumns_) {
this.buildError('ragged_rows', 'structure', lineNumber, null, lineContents)
}
}
} // validateLine
reportLineBreaks (lineContents) {
const l = lineContents.length
if ((l === 0) || (lineContents.substring(l-1) !== '\n')) return
// Return straight away if there's no newline character - i.e. we're on the last line
const lineBreak = getLineBreak(lineContents)
this.lineBreaks_.add(lineBreak)
if (!this.lineBreaksReported_) {
if (this.csvOptions_.rowSep !== 'auto' && !lineContents.endsWith(this.csvOptions_.rowSep))
this.buildError('line_breaks', 'structure')
if (lineBreak !== '\r\n') {
this.buildInfoMessage('nonrfc_line_breaks', 'structure')
this.lineBreaksReported_ = true
}
}
} // reportLineBreaks
validateHeader (header) {
const names = new Set()
if (this.dialect.trim) {
header = header.map(h => h.trim())
}
const warning = (type, index) => this.buildWarning(type, 'schema', null, index + 1)
header.forEach((h, index) => {
if (!h) {
warning('empty_column_name', index)
}
if (names.has(h)) {
warning('duplicate_column_name', index)
}
names.add(h)
})
return this.isValid
} // validateHeader
validateMetaData (headers) {
this.validateContentTypeMetadata(headers)
this.validateLinkHeaders(headers)
} // validateMetadata
validateContentTypeMetadata (headers) {
let assumedHeader = !this.suppliedDialect_
if (Object.keys(headers).length) {
const { contentType, isTextCsv, headerPresent } =
crackContentType(headers)
this.contentType_ = contentType
if (isTextCsv) {
this.csvHeader_ = this.csvHeader_ && true
assumedHeader = this.assumedHeader_
}
if (headerPresent) {
if (headerPresent === 'present') this.csvHeader_ = true
if (headerPresent === 'absent') this.csvHeader_ = false
assumedHeader = false
}
if (!this.contentType_) {
this.buildWarning('no_content_type', 'context')
}
if (this.contentType_ && !isTextCsv) {
this.buildError('wrong_content_type', 'context')
}
} // if (headers.length)
if (assumedHeader) {
this.buildInfoMessage('assumed_header', 'structure')
}
} // validateContentTypeMetadata
validateLinkHeaders (headers) {
/*
@link_headers = @headers["link"].split(",") rescue nil
@link_headers.each do |link_header|
match = LINK_HEADER_REGEXP.match(link_header)
uri = match["uri"].gsub(/(^\<|\>$)/, "") rescue nil
rel = match["rel-relationship"].gsub(/(^\"|\"$)/, "") rescue nil
param = match["param"]
param_value = match["param-value"].gsub(/(^\"|\"$)/, "") rescue nil
if rel == "describedby" && param == "type" && ["application/csvm+json", "application/ld+json", "application/json"].include?(param_value)
begin
url = URI.join(@source_url, uri)
schema = Schema.load_from_uri(url)
if schema.instance_of? Csvlint::Csvw::TableGroup
if schema.tables[@source_url]
@schema = schema
else
warn_if_unsuccessful = true
build_warnings(:schema_mismatch, :context, nil, nil, @source_url, schema)
end
end
rescue OpenURI::HTTPError
end
end
end if @link_headers
*/
} // validateLinkHeaders
finish () {
// sum = @col_counts.inject(:+)
// unless sum.nil?
// build_warnings(:title_row, :structure) if @col_counts.first < (sum / @col_counts.size.to_f)
// end
// # return expected_columns to calling class
// build_warnings(:check_options, :structure) if @expected_columns == 1
this.checkConsistency()
// check_foreign_keys if @validate
this.checkMixedLinebreaks()
// validate_encoding
} // finish
locateSchema () {
if (this.schema_) return
/*
@source_url = nil
warn_if_unsuccessful = false
case @source
when StringIO
return
when File
@source_url = "file:#{URI.encode(File.expand_path(@source))}"
else
@source_url = @source
end
unless @schema.nil?
if @schema.tables[@source_url]
return
else
@schema = nil
end
end
paths = []
if @source_url =~ /^http(s)?/
begin
well_known_uri = URI.join(@source_url, "/.well-known/csvm")
paths = open(well_known_uri).read.split("\n")
rescue OpenURI::HTTPError, URI::BadURIError
end
end
paths = ["{+url}-metadata.json", "csv-metadata.json"] if paths.empty?
paths.each do |template|
begin
template = URITemplate.new(template)
path = template.expand('url' => @source_url)
url = URI.join(@source_url, path)
url = File.new(url.to_s.sub(/^file:/, "")) if url.to_s =~ /^file:/
schema = Schema.load_from_uri(url)
if schema.instance_of? Csvlint::Csvw::TableGroup
if schema.tables[@source_url]
@schema = schema
return
else
warn_if_unsuccessful = true
build_warnings(:schema_mismatch, :context, nil, nil, @source_url, schema)
end
end
rescue Errno::ENOENT
rescue OpenURI::HTTPError, URI::BadURIError, ArgumentError
rescue => e
raise e
end
end
build_warnings(:schema_mismatch, :context, nil, nil, @source_url, schema) if warn_if_unsuccessful
@schema = nil
*/
} // locateSchema
setDialect () {
this.assumedHeader_ = !this.dialect_.header
this.suppliedDialect_ = Object.keys(this.dialect_).length !== 0
const schemaDialect = { }
this.dialect_ = buildDialect(schemaDialect, this.dialect_)
this.csvHeader_ = this.csvHeader_ && this.dialect_.header
this.csvOptions_ = this.dialectToCsvOptions(this.dialect_)
} // setDialect
dialectToCsvOptions (dialect) {
const skipInitialSpace = dialect.skipInitialSpace || true
const delimiter = dialect.delimiter + (!skipInitialSpace ? ' ' : '')
const rowSep = dialect.lineTerminator
const quoteChar = dialect.quoteChar
return {
colSep: delimiter,
rowSep: rowSep,
quoteChar: quoteChar,
skipBlanks: false
}
} // dialectToCsvOptions
async openSourceStream (source) {
if (source.indexOf('\n') !== -1) { // multiline text
return [{}, [source]]
}
if (source.indexOf('http') === 0) {
const response = await fetch(source)
return [
response.headers.raw(),
response.body
]
}
return [{}, fs.createReadStream(source)]
} // openSourceStream
buildFormats (record) {
record.forEach((col, i) => {
if (col === null) return
this.formats_[i] = this.formats_[i] || { }
const format = columnFormat(col)
this.formats_[i][format] = this.formats_[i][format] || 0
this.formats_[i][format] += 1
})
} // buildFormats
checkConsistency () {
this.formats_.forEach((format, i) => {
const counts = Object.values(format).filter(c => c).length
if (counts > 1)
this.buildWarning('inconsistentValues', 'schema', null, i+1)
})
} // checkConsistency
checkMixedLinebreaks () {
if (this.lineBreaks_.size > 1) {
this.buildLinebreakError();
}
} // checkMixedLinebreaks
buildLinebreakError () {
const hasLineBreakErrors = this.errors.some(e => e.type === 'lineBreaks')
if (!hasLineBreakErrors)
this.buildErrors('lineBreaks', 'structure')
} // buildLinebreakError
buildInfoMessage (type, category) {
this.errors_.buildInfoMessage(type, category)
} // buildInfoMessage
buildWarning (type, category, row, column) {
this.errors_.buildWarning(type, category, row, column)
} // buildWarning
buildError (type, category, row, column, content) {
this.errors_.buildError(type, category, row, column, content)
} // buildWarning
buildExceptionMessage (exception, badLine) {
const errorType = CsvParseErrors.translate(exception)
// sometimes get the same error repeated
const existing = this.errors_.errors.find(e =>
e.type === errorType && e.row === exception.lines
)
if (existing) return false
this.errors_.buildError(errorType, 'structure', exception.lines, null, badLine)
return true
} // buildExceptionMessage
} // class Validator
function columnFormat (col) {
const c = col.trim()
if (FORMATS.numeric.test(c))
return "numeric"
if (isUri(c))
return "uri"
if (possibleDate(c))
return dateFormat(c)
return "string"
} // columnFormat
function isUri (col) {
try {
if (FORMATS.uri.test(col)) {
const u = new URL(col)
return ['http:', 'https:'].includes(u.protocol)
}
} catch (e) {
return false
}
} // isUri
function possibleDate(col) {
return POSSIBLE_DATE_REGEXP.test(col)
} // possibleDate
function dateFormat(col) {
} // dateFormat
const MONTHNAMES = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'].join('|')
const ABBR_MONTHNAMES = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'].join('|')
const FORMATS = {
"string": null,
"numeric": /^\d*\.?\d+(?:[eE][-+]?\d+)?$/,
"uri": /^https?:/,
"date_db": /^\d{4,}-\d\d-\d\d$/,
// "12345-01-01"
"date_long": new RegExp(`^(?://|${MONTHNAMES}) [ \\d]\\d, \\d{4,}$`),
// "January 1, 12345"
"date_rfc822": new RegExp(`^[ \\d]\\d (?://|${ABBR_MONTHNAMES}) \\d{4,}$`),
// " 1 Jan 12345"
"date_short": new RegExp(`^[ \\d]\\d (?://|${ABBR_MONTHNAMES})$`),
// "1 Jan"
"dateTime_db": /^\d{4,}-\d\d-\d\d \d\d:\d\d:\d\d$/,
// "12345-01-01 00:00:00"
"dateTime_hms": /^\d\d:\d\d:\d\d$/,
// "00:00:00"
"dateTime_iso8601": /^\d{4,}-\d\d-\d\dT\d\d:\d\d:\d\dZ$/,
// "12345-01-01T00:00:00Z"
"dateTime_long": new RegExp(`^(?://|${MONTHNAMES}) \\d\\d, \\d{4,} \\d\\d:\\d\\d$`), // "January 01, 12345 00:00"
"dateTime_short": new RegExp(`^\\d\\d (?://|${ABBR_MONTHNAMES}) \\d\\d:\\d\\d$`), // "01 Jan 00:00"
"dateTime_time": /^\d\d:\d\d$/,
// "00:00"
}
const POSSIBLE_DATE_REGEXP = new RegExp(`^(\\d|\\s\\d|${ABBR_MONTHNAMES}|${MONTHNAMES})`)
const CsvParseErrors = {
INVALID_OPENING_QUOTE: 'invalidOpeningQuote',
CSV_QUOTE_NOT_CLOSED: 'unclosedQuote',
CSV_INVALID_CLOSING_QUOTE: 'trailingCharacters',
translate (exception) {
return CsvParseErrors[exception.code] || 'unknownError'
}
} // CsvErrorTranslations
async function * chunksToLines (chunksGen) {
let previous = ''
for await (const chunk of chunksGen) {
previous += chunk
let eolIndex
while ((eolIndex = previous.indexOf('\n')) >= 0) {
const line = previous.slice(0, eolIndex + 1)
yield line
previous = previous.slice(eolIndex + 1)
}
}
if (previous.length > 0) {
yield previous
}
} // chunksToLines
function buildDialect (...dialects) {
dialects.unshift(defaultDialect)
const mergedDialect = { }
for (const dialect of dialects) {
Object.assign(mergedDialect, dialect)
}
return mergedDialect
} // buildDialect
const textCsv = /text\/csv/
const presentOrAbsent = /header=(present|absent)/g
function crackContentType (headers) {
const [contentType] = headers['content-type']
const isTextCsv = textCsv.test(contentType)
const headerPresent = contentType.matchAll(presentOrAbsent).next().value // this is not a normal way to use an iterator
return {
contentType,
isTextCsv,
headerPresent: (headerPresent ? headerPresent[1] : null)
}
}
function getLineBreak (line) {
const eol = line.substring(line.length-2)
return (eol[0] === '\r') ? '\r\n' : '\n'
} // getLineBreak
async function validate (
source,
dialect = {},
schema = null,
options = {}
) {
const validator = new Validator(source, dialect, schema, options)
await validator.validate()
return validator
} // validate
module.exports = validate
<file_sep>const XRegExp = require('xregexp')
function escapeRegExp (string) {
return string.replace(/[.*+\-?^${}()|[\]\\]/g, '\\$&') // $& means the whole matched string
}
function all (string) {
if (!string) return string
return new RegExp(escapeRegExp(string), 'g')
} // all
class NumberFormat {
get integer () { return this.integer_ }
get pattern () { return this.pattern_ }
get prefix () { return this.prefix_ }
get numericPart () { return this.numericPart_ }
get suffix () { return this.suffix_ }
get groupingSeparator () { return this.groupingSeparator_ }
get decimalSeparator () { return this.decimalSeparator_ }
get primaryGroupingSize () { return this.primaryGroupingSize_ }
get secondaryGroupingSize () { return this.secondaryGroupingSize_ }
get fractionalGroupingSize () { return this.fractionalGroupingSize_ }
constructor (pattern = null, groupingSeparator = null, decimalSeparator = '.', integer = null) {
this.pattern_ = pattern
this.integer_ = isInteger(integer, pattern, decimalSeparator)
this.groupingSeparator_ = groupingSeparator || (this.pattern_ === null ? null : ',')
this.decimalSeparator_ = decimalSeparator || '.'
if (pattern === null) {
this.regexp_ = integer ? INTEGER_REGEXP : NUMBER_REGEXP
} else {
this.regexp_ = this.buildPatternRegex(pattern)
}
} // constructor
match (value) {
return this.regexp_.test(value)
} // match
parse (value) {
return (this.pattern_ === null)
? this.parseSimpleNumber(value)
: this.parsePattern(value)
} // parse
parseSimpleNumber (value) {
if ((this.groupingSeparator_ !== null) &&
new RegExp(`((^${escapeRegExp(this.groupingSeparator_)})|${escapeRegExp(this.groupingSeparator_)}{2})`).test(value)) {
return null
}
value = value.replace(all(this.groupingSeparator_), '')
value = value.replace(all(this.decimalSeparator_), '.')
if (!this.regexp_.test(value)) {
return null
}
switch (value) {
case 'NaN':
return Number.NaN
case 'INF':
return Number.POSITIVE_INFINITY
case '-INF':
return Number.NEGATIVE_INFINITY
default: {
switch (value[value.length - 1]) {
case '%':
return Number.parseFloat(value) / 100
case 'โฐ':
return Number.parseFloat(value) / 1000
default:
return this.integer_ ? Number.parseInt(value) : Number.parseFloat(value)
}
}
}
} // parseSimpleNumber
parsePattern (value) {
const match = XRegExp.exec(value, this.regexp_)
if (match === null) {
return null
}
let number = match.numeric_part
number = number.replace(all(this.groupingSeparator_), '')
number = number.replace(all(this.decimalSeparator_), '.')
const asFloat = Number.parseFloat(number)
number = this.integer_ ? Number.parseInt(number) : asFloat
if (match.prefix.includes('%') || match.suffix.includes('%')) {
number = asFloat / 100
}
if (match.prefix.includes('โฐ') || match.suffix.includes('โฐ')) {
number = asFloat / 1000
}
return number
} // parsePattern
extractNumberFormatParts () {
const parts = this.numericPart_.split('E')
const mantissaPart = parts[0]
const exponentPart = parts[1] || ''
const mantissaParts = mantissaPart.split(this.decimalSeparator)
// raise Csvw::NumberFormatError, "more than two decimal separators in number format" if parts.length > 2
let integerPart = mantissaParts[0]
const fractionalPart = mantissaParts[1] || ''
let signRegExp = '[-+]?'
if (['+', '-'].includes(integerPart[0])) {
signRegExp = `\\${integerPart[0]}`
integerPart = integerPart.substring(1)
}
return {
integerPart,
fractionalPart,
exponentPart,
signRegExp
}
} // extractNumberFormatParts
buildMinMaxDigits (
integerPart,
fractionalPart,
exponentPart
) {
const allGroupingSeps = all(this.groupingSeparator_)
const allHashs = all('#')
const minIntegerDigits = integerPart
.replace(allGroupingSeps, '')
.replace(allHashs, '')
.length
const minFractionDigits = fractionalPart
.replace(allGroupingSeps, '')
.replace(allHashs, '')
.length
const maxFractionDigits = fractionalPart
.replace(allGroupingSeps, '')
.length
const minExponentDigits = exponentPart
.replace(allHashs, '')
.length
const maxExponentDigits = exponentPart.length
return {
minIntegerDigits,
minFractionDigits,
maxFractionDigits,
minExponentDigits,
maxExponentDigits
}
} // buildMinMaxDigits
buildPatternRegex (pattern) {
let numericPartRegExp = `(?<numeric_part>[-+]?([0#Ee]|${escapeRegExp(this.groupingSeparator_)}|${escapeRegExp(this.decimalSeparator)})+)`
const numberFormatRegexp = XRegExp(`^(?<prefix>.*?)${numericPartRegExp}(?<suffix>.*?)$`)
const match = XRegExp.exec(pattern, numberFormatRegexp)
if (match === null) numberFormatError('invalid number format')
this.prefix_ = match.prefix
this.numericPart_ = match.numeric_part
this.suffix_ = match.suffix
const {
integerPart,
fractionalPart,
exponentPart,
signRegExp
} = this.extractNumberFormatParts()
const {
minIntegerDigits,
minFractionDigits,
maxFractionDigits,
minExponentDigits,
maxExponentDigits
} = this.buildMinMaxDigits(
integerPart,
fractionalPart,
exponentPart
)
const integerParts = integerPart
.split(this.groupingSeparator_)
.slice(1)
const integerPartsCount = integerParts.length
this.primaryGroupingSize_ = (integerParts.length > 0) ? integerParts[integerPartsCount - 1].length : 0
this.secondaryGroupingSize_ = (integerParts.length > 1) ? integerParts[integerPartsCount - 2].length : this.primaryGroupingSize_
const fractionalParts = fractionalPart.split(this.groupingSeparator_).slice(0, -1)
const fractionalPartsCount = fractionalParts.length
this.fractionalGroupingSize_ = fractionalPartsCount ? fractionalParts[0].length : 0
numericPartRegExp = signRegExp + this.buildIntegerRegex(minIntegerDigits)
numericPartRegExp += this.buildFractionalRegex(minFractionDigits, maxFractionDigits)
numericPartRegExp += this.buildExponentRegex(minExponentDigits, maxExponentDigits)
return XRegExp(`^(?<prefix>${escapeRegExp(this.prefix_)})(?<numeric_part>${numericPartRegExp})(?<suffix>${escapeRegExp(this.suffix_)})$`)
} // buildPatternRegex
buildIntegerRegex (minIntegerDigits) {
if (this.primaryGroupingSize_ === 0) {
return `[0-9]*[0-9]{${minIntegerDigits}}`
}
const leadingRegexp = `([0-9]{0,${this.secondaryGroupingSize_ - 1}}${escapeRegExp(this.groupingSeparator_)})?`
const secondaryGroups = `([0-9]{${this.secondaryGroupingSize_}}${escapeRegExp(this.groupingSeparator_)})*`
if (minIntegerDigits > this.primaryGroupingSize_) {
const remainingReqDigits = minIntegerDigits - this.primaryGroupingSize_
const reqSecondaryGroups = remainingReqDigits / this.secondaryGroupingSize_ > 0 ? `([0-9]{${this.secondaryGroupingSize_}}${escapeRegExp(this.groupingSeparator_)}){${Math.floor(remainingReqDigits / this.secondaryGroupingSize_)}}` : ''
if (remainingReqDigits % this.secondaryGroupingSize_ > 0) {
const finalReqDigits = `[0-9]{${this.secondaryGroupingSize_ - (remainingReqDigits % this.secondaryGroupingSize_)}}`
const finalOptDigits = `[0-9]{0,${this.secondaryGroupingSize_ - (remainingReqDigits % this.secondaryGroupingSize_)}}`
return `((${leadingRegexp}${secondaryGroups}${finalReqDigits})|${finalOptDigits})[0-9]{${remainingReqDigits % this.secondaryGroupingSize_}}${escapeRegExp(this.groupingSeparator_)}${reqSecondaryGroups}[0-9]{${this.primaryGroupingSize_}}`
} else {
return `(${leadingRegexp}${secondaryGroups})?${reqSecondaryGroups}[0-9]{${this.primaryGroupingSize_}}`
}
} else {
const finalReqDigits = this.primaryGroupingSize_ > minIntegerDigits ? `[0-9]{${this.primaryGroupingSize_ - minIntegerDigits}}` : ''
const finalOptDigits = this.primaryGroupingSize_ > minIntegerDigits ? `[0-9]{0,${this.primaryGroupingSize_ - minIntegerDigits}}` : ''
return `((${leadingRegexp}${secondaryGroups}${finalReqDigits})|${finalOptDigits})[0-9]{${minIntegerDigits}}`
}
} // buildIntegerRegex
buildFractionalRegex (minFractionDigits, maxFractionDigits) {
if (maxFractionDigits === 0) {
return ''
}
if (this.fractionalGroupingSize_ === 0) {
let fractionalRegExp = ''
if (minFractionDigits > 0) {
fractionalRegExp += `[0-9]{${minFractionDigits}}`
}
if (minFractionDigits !== maxFractionDigits) {
fractionalRegExp += `[0-9]{0,${maxFractionDigits - minFractionDigits}}`
}
fractionalRegExp = `${escapeRegExp(this.decimalSeparator)}${fractionalRegExp}`
if (minFractionDigits === 0) {
fractionalRegExp = `(${fractionalRegExp})?`
}
return fractionalRegExp
}
// fractionalGroupSize_ > 0
let fractionalRegExp = ''
if (minFractionDigits > 0) {
if (minFractionDigits >= this.fractionalGroupingSize_) {
// first group of required digits - something like "[0-9]{3}"
fractionalRegExp += `[0-9]{${this.fractionalGroupingSize_}}`
// additional groups of required digits - something like "(,[0-9]{3}){1}"
if (minFractionDigits / this.fractionalGroupingSize_ > 1) {
fractionalRegExp += `(${escapeRegExp(this.groupingSeparator_)}[0-9]{${this.fractionalGroupingSize_}}){${Math.floor(minFractionDigits / this.fractionalGroupingSize_ - 1)}}`
}
if (minFractionDigits % this.fractionalGroupingSize_ > 0) {
fractionalRegExp += `${escapeRegExp(this.groupingSeparator_)}`
}
}
// additional required digits - something like ",[0-9]{1}"
if (minFractionDigits % this.fractionalGroupingSize_ > 0) {
fractionalRegExp += `[0-9]{${minFractionDigits % this.fractionalGroupingSize_}}`
}
const optFractionalDigits = maxFractionDigits - minFractionDigits
if (optFractionalDigits > 0) {
fractionalRegExp += '('
if (minFractionDigits % this.fractionalGroupingSize_ > 0) {
// optional fractional digits to complete the group
const groupOptDigits = Math.min(optFractionalDigits, this.fractionalGroupingSize_ - (minFractionDigits % this.fractionalGroupingSize_))
fractionalRegExp += `[0-9]{0,${groupOptDigits}}`
fractionalRegExp += '|'
fractionalRegExp += `[0-9]{${groupOptDigits}}`
} else {
fractionalRegExp += `(${escapeRegExp(this.groupingSeparator_)}[0-9]{1,${this.fractionalGroupingSize_}})?`
fractionalRegExp += '|'
fractionalRegExp += `${escapeRegExp(this.groupingSeparator_)}[0-9]{${this.fractionalGroupingSize_}}`
}
const remainingOptFractionalDigits = optFractionalDigits - (this.fractionalGroupingSize_ - (minFractionDigits % this.fractionalGroupingSize_))
if (remainingOptFractionalDigits > 0) {
if (remainingOptFractionalDigits % this.fractionalGroupingSize_ > 0) {
// optional fraction digits in groups
if (remainingOptFractionalDigits > this.fractionalGroupingSize_) {
fractionalRegExp += `(${escapeRegExp(this.groupingSeparator_)}[0-9]{${this.fractionalGroupingSize_}}){0,${Math.floor(remainingOptFractionalDigits / this.fractionalGroupingSize_)}}`
}
// remaining optional fraction digits
fractionalRegExp += `(${escapeRegExp(this.groupingSeparator_)}[0-9]{1,${remainingOptFractionalDigits % this.fractionalGroupingSize_}})?`
} else {
// optional fraction digits in groups
if (remainingOptFractionalDigits > this.fractionalGroupingSize_) {
fractionalRegExp += `(${escapeRegExp(this.groupingSeparator_)}[0-9]{${this.fractionalGroupingSize_}}){0,${Math.floor(remainingOptFractionalDigits / this.fractionalGroupingSize_) - 1}}`
}
// remaining optional fraction digits
fractionalRegExp += `(${escapeRegExp(this.groupingSeparator_)}[0-9]{1,${this.fractionalGroupingSize_}})?`
}
// optional fraction digits in groups
if (remainingOptFractionalDigits > this.fractionalGroupingSize_) {
fractionalRegExp += `(${escapeRegExp(this.groupingSeparator_)}[0-9]{${this.fractionalGroupingSize_}}){0,${Math.floor(remainingOptFractionalDigits / this.fractionalGroupingSize_) - 1}}`
}
// remaining optional fraction digits
if (remainingOptFractionalDigits % this.fractionalGroupingSize_ > 0) {
fractionalRegExp += `(${escapeRegExp(this.groupingSeparator_)}[0-9]{1,${remainingOptFractionalDigits % this.fractionalGroupingSize_}})?`
}
}
fractionalRegExp += ')'
}
} else if (maxFractionDigits % this.fractionalGroupingSize_ > 0) {
// optional fractional digits in groups
fractionalRegExp += `([0-9]{${this.fractionalGroupingSize_}}${escapeRegExp(this.groupingSeparator_)}){0,${Math.floor(maxFractionDigits / this.fractionalGroupingSize_)}}`
// remaining optional fraction digits
if (maxFractionDigits % this.fractionalGroupingSize_ > 0) {
fractionalRegExp += `(${escapeRegExp(this.groupingSeparator_)}[0-9]{1,${maxFractionDigits % this.fractionalGroupingSize_}})?`
}
} else {
if (maxFractionDigits > this.fractionalGroupingSize_) {
fractionalRegExp += `([0-9]{${this.fractionalGroupingSize_}}${escapeRegExp(this.groupingSeparator_)}){0,${Math.floor(maxFractionDigits / this.fractionalGroupingSize_) - 1}}`
fractionalRegExp += `[0-9]{1,${this.fractionalGroupingSize_}}`
}
}
fractionalRegExp = `${escapeRegExp(this.decimalSeparator)}${fractionalRegExp}`
if (minFractionDigits === 0) {
fractionalRegExp = `(${fractionalRegExp})?`
}
return fractionalRegExp
} // buildFractionalRegex
buildExponentRegex (minExponentDigits, maxExponentDigits) {
if (maxExponentDigits === 0) {
return ''
}
let exponentRegExp = 'E'
if (maxExponentDigits !== minExponentDigits) {
exponentRegExp += `[0-9]{0,${maxExponentDigits - minExponentDigits}}`
} // if ...
if (minExponentDigits !== 0) {
exponentRegExp += `[0-9]{${minExponentDigits}}`
}
return exponentRegExp
} // buildExponentRegex
} // class NumberFormat
function isInteger (integer, pattern, decimalSeparator) {
if (integer === null) {
return pattern ? !pattern.includes(decimalSeparator) : null
}
return integer
} // isInteger
const INTEGER_REGEXP = /^[-+]?[0-9]+[%โฐ]?$/
const NUMBER_REGEXP = /^(([-+]?[0-9]+(\.[0-9]+)?([Ee][-+]?[0-9]+)?[%โฐ]?)|NaN|INF|-INF)$/
function numberFormatError (msg = '') {
const err = new Error(msg)
err.name = 'Number Format Error'
throw err
} // numberFormatError
module.exports = (pattern, groupingSeparator, decimalSeparator, integer) =>
new NumberFormat(pattern, groupingSeparator, decimalSeparator, integer)
|
30acf40e62e7f4c721ba5fd6fd2e5663d5a50247
|
[
"JavaScript",
"Markdown"
] | 11 |
JavaScript
|
jezhiggins/csvlint.js
|
19c07eb2a171b6673958ffd503184f2705359679
|
fc3204ca1832306aded77e014a0c78cdde612850
|
refs/heads/master
|
<file_sep><!doctype html>
<html class="no-js" lang="">
<head>
<meta charset="utf-8">
<meta http-equiv="x-ua-compatible" content="ie=edge">
<title>SMARTY</title>
<meta name="description" content="">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="apple-touch-icon" href="apple-touch-icon.png">
<link rel="icon" type"image/x-icon" href="img/house1.ico" />
<!-- Place favicon.ico in the root directory -->
<link rel="stylesheet" href="css/skeleton.css">
<link rel="stylesheet" href="css/normalize.css">
<link rel="stylesheet" href="css/main.css">
<script src="js/vendor/modernizr-2.8.3.min.js"></script>
</head>
<body>
<!--[if lt IE 8]>
<p class="browserupgrade">You are using an <strong>outdated</strong> browser. Please <a href="http://browsehappy.com/">upgrade your browser</a> to improve your experience.</p>
<![endif]-->
<!-- Add your site or application content here -->
<div class="home slide1 ">
<header class="head">
<div class="logo">
<img src="img/logo.png" height="100%" />
</div>
<div class="menu">
<ul> <a href="#home"><li>Home</li></a>
<a href="#about"><li>About </li></a>
<a href="#feature"><li>Features </li></a>
<a href="#work"><li>How It Works</li></a>
<a href="#contact"><li>Contact</li></a></ul>
</div>
</header>
<div class="home-content"id="home">
<div class="sm">SM</div>
<div class="house" ><img src="img/house.png" width="100px" height="110px"/></div>
<div class="rty">RTY</div>
<div class="subline">Giving Life To Home</div>
<div class="subtext">Smart Home Smart Society</div>
</div>
</div>
<div class="about" id="about">
<div class="title">ABOUT<div>♦♦♦</div></div>
<p><b>Smarty</b> is a <b>Artificial Intelligent home</b> system which senses all our applicances and people inside home. <b>Smarty</b> plays an important role when user are inside home and out of home. <b>Smarty</b> automatically senses threat and send alerts to the user through smarty <b>mobile app</b> or wearable devices.</p>
<div class="info-graphic"><img src="img/Capture.png" /></div>
<p><b>Smarty</b> helps in monitoring health, power consumption, water consumption predicts the future usage. It alerts us when other peoples enters our home without our knowledge.</p>
</div>
<div class="features-container" id="feature">
<div class="title">FEATURES <div>♦♦♦</div></div><br>
<div class="home-app features left">
<div class="feature-title ">
Home Appliances Control
</div>
<div class="desc">
<b>Smarty<b> monitor and control all appliances in the home, so that it could conserve more power.
</div>
</div>
<div class="resource-monitor features right">
<div class="feature-title">
Resource Monitoring
</div>
<div class="desc">
<b>Smarty</b> keeps track of water and electricity consumption and also displays statistics when user prompt it.
</div>
</div>
<div class="security-alert features left">
<div class="feature-title">
Disaster Alerts
</div>
<div class="desc">
when <b>Smarty </b> predicts the disaster to be occured, It analyse and provides the safe path of evacuvation and time.
</div>
</div>
<div class="disaster-alert features right">
<div class="feature-title">
Security Alerts
</div>
<div class="desc">
<b> Smarty</b> alerts us incase of security issues like theft, leakages etc... <br /><br />
</div>
</div>
</div>
<div class="container work" id="work">
<div class="title">HOW IT WORKS <div>♦♦♦</div></div>
<div class="row">
<div class="columns six">
<div class="img img-left"><img src="img/login.png" /></div>
<div class="future flow-left"><div class="flow-title">Login</div>
<div class="flow-desc">User should login with either Biometric or password to control the Smarty system.</div></div>
</div>
<div class="columns six">
</div>
</div>
<div class="row">
<div class="columns six">
</div>
<div class="columns six">
<div class="future flow-right"><div class="flow-title">User Configuration</div>
<div class="flow-desc">User need to configure the system for the first time as per his/her wish to do things.</div></div>
<div class="img img-right"><img src="img/userconf.png" /></div>
</div>
</div>
<div class="row">
<div class="columns six">
<div class="img img-left"><img src="img/analytic.png" /></div>
<div class="future flow-left"><div class="flow-title">Analytics</div>
<div class="flow-desc ">Smarty system displays the statistic of each and every equipment to the user over a period of time.</div></div>
</div>
<div class="columns six">
</div>
</div>
<div class="row">
<div class="columns six">
</div>
<div class="columns six">
<div class="future flow-right"><div class="flow-title">Usability and Alerts</div>
<div class="flow-desc">User can control the System anytime using VOICE command or UI . Smarty alerts user incase any emergencies.</div></div>
<div class="img img-right"><img src="img/ua.png" /></div>
</div>
</div>
</div>
<div class="linker"><img src="img/link.png" class="link" /></div>
<div class="contact" id="contact">
<div class="title" >CONTACT<div>♦♦♦</div></div>
<div class="contact-content">
<div class="form-elem">
<form class="form" action="mailto:<EMAIL>" method="post" enctype="text/plain">
NAME:<br>
<input type="text" class="text"/><br>
E-MAIL:<br>
<input type="email" class="email"/><br>
YOUR MESSAGE:<br>
<input type="textarea" class="textarea"/><br><br>
<input type="submit" class="submit"/><br>
</form>
</div>
<div class="add">
<div class="team" >Mind Blowers</div>
<div class="event" >Design Jam </div>
<div class="company">ThoghtWorks </div>
<div class="twitter"><a href="www.twitter.com">https://twitter.com/mind.blowers.995/smarty/?href</a></div>
<div class="facebook"><a href="ww.facebook.com">https://facebook.com/mind.blowers.9/smarty?=!</a></div>
</div>
</div>
</div>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script>
<script>window.jQuery || document.write('<script src="js/vendor/jquery-1.11.3.min.js"><\/script>')</script>
<script src="js/plugins.js"></script>
<script src="js/main.js"></script>
<!-- <script src="js/jquery.js"></script> -->
<!-- Google Analytics: change UA-XXXXX-X to be your site's ID. -->
<script>
(function(b,o,i,l,e,r){b.GoogleAnalyticsObject=l;b[l]||(b[l]=
function(){(b[l].q=b[l].q||[]).push(arguments)});b[l].l=+new Date;
e=o.createElement(i);r=o.getElementsByTagName(i)[0];
e.src='https://www.google-analytics.com/analytics.js';
r.parentNode.insertBefore(e,r)}(window,document,'script','ga'));
ga('create','UA-XXXXX-X','auto');ga('send','pageview');
</script>
</body>
</html>
<file_sep>function main(){
$('a[href*=#]:not([href=#])').click(function() {
if (location.pathname.replace(/^\//,'') == this.pathname.replace(/^\//,'') && location.hostname == this.hostname) {
var target = $(this.hash);
target = target.length ? target : $('[name=' + this.hash.slice(1) +']');
if (target.length) {
$('html,body').animate({
scrollTop: target.offset().top - topBarHeight // + 1 for firefox bug
}, 1000);
return false;
}
}
}
var winHeight=$(window).height();
var topBarHeight=$('.head').height();
$(".home-content").css("height",winHeight-topBarHeight);
;
function slide1() {
$('.home').removeClass(".slide2");
$('.home').addClass(".slide1");
$('.subline').hide(100,function () {
$('.subtext').show(100);
});
}
function slide2() {
$('.home').removeClass('.slide1');
$('.home').addClass(".slide2");
$('.subtext').hide(100,function () {
$('.subline').show(100);
});
}
count=0;
setInterval(function(){
if(count%2==0){
slide2();
count++;
}else {
slide1();
count++;
}
},2000);
}
$(document).ready(main);
|
82bf9f45b1ddb72fa280bc6626ccb73378ecb7f3
|
[
"JavaScript",
"HTML"
] | 2 |
HTML
|
subatheesh-rts/Design-Jam---Smarty
|
66f45c9a15eba36b5d8e9e71e798066088ed508f
|
a4f2b9a5ab6851dba59061afa2ed5c27f7a04e0d
|
refs/heads/main
|
<repo_name>st234pa/i-am-an-artiste<file_sep>/README.md
# i-am-an-artiste
beep boop i'm an impressionist painter
## How to use
### Get the code
Clone or download (and unzip) this project.
### Add input images
Add your image (has to be in `.png` format) to the `Input` sub-folder.
### Modify `main()`
In the `a9_main.cpp` file, change the line
```
testOriented("./Input/saena_color.png", "./Output/saena_color_painted.png", 25, 50000, 0.3f);
```
to call the function on your input image.
In this line you must specify the name of your input file, the name of your output file. You can also adjust the brush stroke size, the number of brush strokes, and the amount of noise (random color variation) if you so choose.
Additionally, you can customize the type of brush stroke by changing the line
```
Image texture("./Input/brush.png");
```
to:
```
Image texture("./Input/longBrush.png");
```
or
```
Image texture("./Input/longBrush2.png");
```
### Run code
To run the code, first open your terminal and then navigate to the project's root directory.
```
cd <path where you stored this project>/i-am-an-artiste
```
Finally run the command below and wait.
```
make run
```
Et voilร !
<file_sep>/a9.cpp
#include <iostream>
#include "a9.h"
using namespace std;
// 2) Paintbrush splatting
void brush(Image &out, int x, int y, Vec3f color, const Image &texture)
{
for (int c = 0; c < texture.channels(); c++)
{
for (int j = 0; j < texture.height(); j++)
{
for (int i = 0; i < texture.width(); i++)
{
int xi = x + i - texture.width() / 2;
int yj = y + j - texture.height() / 2;
if (xi >= 0 && xi < out.width() && yj >= 0 && yj < out.height())
{
out(xi, yj, c) = texture(i, j, c) * color(c) + (1.0f - texture(i, j, c)) * out(xi, yj, c);
}
}
}
}
}
// 3.1-2) Painterly rendering - single scale
void singleScalePaint(const Image &im, Image &out, const Image &importance, const Image &texture, int size, int N, float noise)
{
float scale = (float)size / (float)std::max(texture.width(), texture.height());
Image scaledTexture = scaleNN(texture, scale);
std::random_device rt;
std::mt19937 mtX(rt());
std::uniform_int_distribution<int> xDistribution(0, im.width() - 1);
std::mt19937 mtY(rt());
std::uniform_int_distribution<int> yDistribution(0, im.height() - 1);
std::mt19937 mtR(rt());
std::uniform_real_distribution<float> importanceDistribution(0.0f, 1.0f);
std::mt19937 mtN(rt());
std::uniform_real_distribution<float> noiseDistribution(0.0f, 1.0f);
int numStrokes = 0;
while (numStrokes < N)
{
int y = yDistribution(mtY);
int x = xDistribution(mtX);
bool xBound = x >= scaledTexture.width() / 2 && x < out.width() - scaledTexture.width() / 2 - 1;
bool yBound = y >= scaledTexture.height() / 2 && y < out.height() - scaledTexture.height() / 2 - 1;
if (xBound && yBound)
{
float i = importanceDistribution(mtR);
if (i <= importance(x, y))
{
numStrokes++;
Vec3f color;
color(0) = im(x, y, 0) * (1.0f - noise / 2.0f + noise * noiseDistribution(mtN));
color(1) = im(x, y, 1) * (1.0f - noise / 2.0f + noise * noiseDistribution(mtN));
color(2) = im(x, y, 2) * (1.0f - noise / 2.0f + noise * noiseDistribution(mtN));
brush(out, x, y, color, scaledTexture);
}
}
}
}
// 3.3) Painterly rendering - two scale
Image sharpnessMap(const Image &im, float sigma)
{
Image L(im.width(), im.height(), im.channels());
for (int y = 0; y < im.height(); y++)
{
for (int x = 0; x < im.width(); x++)
{
float w = im(x, y, 0) * 0.3f + im(x, y, 1) * 0.6f + im(x, y, 2) * 0.1f;
L(x, y, 0) = w;
L(x, y, 1) = w;
L(x, y, 2) = w;
}
}
Image blur = gaussianBlur_separable(L, sigma);
Image high = L - blur;
Image energy = high * high;
Image sharpness = gaussianBlur_separable(energy, 4.0f * sigma);
sharpness = sharpness / sharpness.max();
return sharpness;
}
Image painterly(const Image &im, const Image &texture, int N, int size, float noise)
{
Image out(im.width(), im.height(), im.channels());
Image constantImportance(im.width(), im.height(), im.channels());
for (int x = 0; x < constantImportance.number_of_elements(); x++)
{
constantImportance(x) = 1.0f;
}
singleScalePaint(im, out, constantImportance, texture, size, N, noise);
Image importance = sharpnessMap(im);
singleScalePaint(im, out, importance, texture, size / 4.0f, N, noise);
return out;
}
// 4.1) Orientation extraction
Image getBlurredLumi(const Image &im, float sigmaG)
{
Image lumi = lumiChromi(im)[0];
return gaussianBlur_separable(lumi, sigmaG);
}
Image computeTensor(const Image &im, float sigmaG, float factorSigma)
{
Image blurredLumi = getBlurredLumi(im, sigmaG);
Image Ix = gradientX(blurredLumi);
Image Iy = gradientY(blurredLumi);
Image M(im.width(), im.height(), 3);
for (int y = 0; y < im.height(); y++)
{
for (int x = 0; x < im.width(); x++)
{
M(x, y, 0) = Ix(x, y) * Ix(x, y);
M(x, y, 1) = Ix(x, y) * Iy(x, y);
M(x, y, 2) = Iy(x, y) * Iy(x, y);
}
}
return gaussianBlur_separable(M, sigmaG * factorSigma);
}
Matrix tensorImageToMatrix(const Image &tensor, int x, int y)
{
Matrix ret(2, 2);
ret << tensor(x, y, 0), tensor(x, y, 1), tensor(x, y, 1), tensor(x, y, 2);
return ret;
}
float horizontalAngle(Vec2f v)
{
float angle = std::atan2(sin(v.y()), cos(v.x()));
angle = std::fmod(angle, M_2_PI);
return angle;
}
Vec2f smallestEigenvector(Matrix eigenVectors)
{
Vec2f ret;
float smallestNorm;
for (int c = 0; c < eigenVectors.cols(); c++)
{
Vec2f eigenVector = eigenVectors.col(c);
float eigenvectorNorm = eigenVector.norm();
if (c == 0 || eigenvectorNorm < smallestNorm)
{
smallestNorm = eigenvectorNorm;
ret = eigenVector;
}
}
return ret;
}
Image computeAngles(const Image &im)
{
Image ret(im.width(), im.height(), im.channels());
Image tensorImage = computeTensor(im);
Vec2f horizontalVector;
for (int y = 0; y < im.height(); y++)
{
for (int x = 0; x < im.width(); x++)
{
Matrix structureTensor = tensorImageToMatrix(tensorImage, x, y);
Eigen::EigenSolver<Matrix> solver(structureTensor);
Matrix eigenVectors = solver.eigenvectors().real();
Vec2f smallest = smallestEigenvector(eigenVectors);
float angle = horizontalAngle(smallest);
for (int c = 0; c < im.channels(); c++)
{
ret(x, y, c) = angle / M_2_PI;
}
}
}
return ret;
}
// 4.2) Oriented painterly rendering - single scale
void singleScaleOrientedPaint(const Image &im, Image &out, const Image &thetas, const Image &importance, const Image &texture, int size, int N, float noise, int nAngles)
{
float scale = (float)size / (float)std::max(texture.width(), texture.height());
Image scaledTexture = scaleNN(texture, scale);
std::random_device rt;
std::mt19937 mtX(rt());
std::uniform_int_distribution<int> xDistribution(0, im.width() - 1);
std::mt19937 mtY(rt());
std::uniform_int_distribution<int> yDistribution(0, im.height() - 1);
std::mt19937 mtR(rt());
std::uniform_real_distribution<float> importanceDistribution(0.0f, 1.0f);
std::mt19937 mtN(rt());
std::uniform_real_distribution<float> noiseDistribution(0.0f, 1.0f);
int numStrokes = 0;
while (numStrokes < N)
{
int y = yDistribution(mtY);
int x = xDistribution(mtX);
bool xBound = x >= scaledTexture.width() / 2 && x < out.width() - scaledTexture.width() / 2 - 1;
bool yBound = y >= scaledTexture.height() / 2 && y < out.height() - scaledTexture.height() / 2 - 1;
if (xBound && yBound)
{
float i = importanceDistribution(mtR);
if (i <= importance(x, y))
{
numStrokes++;
Vec3f color;
color(0) = im(x, y, 0) * (1.0f - noise / 2.0f + noise * noiseDistribution(mtN));
color(1) = im(x, y, 1) * (1.0f - noise / 2.0f + noise * noiseDistribution(mtN));
color(2) = im(x, y, 2) * (1.0f - noise / 2.0f + noise * noiseDistribution(mtN));
float theta = thetas(x, y);
Image rotatedTexture = rotate(scaledTexture, theta * M_2_PI);
brush(out, x, y, color, rotatedTexture);
}
}
}
}
// 4.3) Oriented painterly rendering - two scale
Image orientedPaint(const Image &im, const Image &texture, int size, int N, float noise)
{
Image out(im.width(), im.height(), im.channels());
Image constantImportance(im.width(), im.height(), im.channels());
for (int x = 0; x < constantImportance.number_of_elements(); x++)
{
constantImportance(x) = 1.0f;
}
Image thetas = computeAngles(im);
singleScaleOrientedPaint(im, out, thetas, constantImportance, texture, size, N, noise);
Image importance = sharpnessMap(im);
singleScaleOrientedPaint(im, out, thetas, importance, texture, size / 4.0f, N, noise);
return out;
}
<file_sep>/a9.h
#ifndef A9_H_PHUDVTKB
#define A9_H_PHUDVTKB
#include "Image.h"
#include "basicImageManipulation.h"
#include "filtering.h"
#include "matrix.h"
#include <random>
// Write your declarations here, or extend the Makefile if you add source
// files
// 2) Paintbrush splatting
void brush(Image &out, int y, int x, Vec3f color, const Image &texture);
// 3.1-2) Painterly rendering - single scale
void singleScalePaint(const Image &im, Image &out, const Image &importance, const Image &texture, int size = 10, int N = 1000, float noise = 0.3f);
// 3.3) Painterly rendering - two scale
Image painterly(const Image &im, const Image &texture, int N = 10000, int size = 50, float noise = 0.3f);
// 4.1) Orientation extraction
Image computeAngles(const Image &im);
// 4.2) Oriented painterly rendering - single scale
void singleScaleOrientedPaint(const Image &im, Image &out, const Image &thetas, const Image &importance, const Image &texture, int size, int N, float noise, int nAngles = 36);
// 4.3) Oriented painterly rendering - two scale
Image orientedPaint(const Image &im, const Image &texture, int size = 50, int N = 7000, float noise = 0.3f);
// Helper functions
Image sharpnessMap(const Image &im, float sigma = 1.0f);
Image getBlurredLumi(const Image &im, float sigmaG);
Image computeTensor(const Image &im, float sigmaG = 3.0f, float factorSigma = 5.0f);
Matrix tensorImageToMatrix(const Image &tensor, int x, int y);
float horizontalAngle(Vec2f v);
Vec2f smallestEigenvector(Matrix eigenVectors);
#endif /* end of include guard: A9_H_PHUDVTKB */
<file_sep>/a9_main.cpp
#include <iostream>
#include "a9.h"
using namespace std;
void testOriented(string in, string out, int size = 50, int N = 7000, float noise = 0.3f)
{
Image texture("./Input/longBrush2.png");
Image oriented = orientedPaint(Image(in), texture, size, N, noise);
oriented.write(out);
}
int main()
{
// name of input file, name of output file, brush stroke size (default is 50), number of strokes (default is 7000), amount of random color variation (default is 0.3f)
testOriented("./Input/saena_color.png", "./Output/saena_color_painted.png", 25, 50000, 0.3f);
return EXIT_SUCCESS;
}
|
ec912a57fa7208bbe6f4ca9a6375f5f04659f8b5
|
[
"Markdown",
"C",
"C++"
] | 4 |
Markdown
|
st234pa/i-am-an-artiste
|
2c6ff5e2fcdbfedeb8f64d1e126fa77f4b0776ba
|
e648d1721072edb6a510330a8b74c9e767fe5f40
|
refs/heads/master
|
<file_sep>// USERNAME: krishankantray
// INSTITUTION: Galgotias University
#include <iostream>
#include <cstdio>
#include <vector>
#include <set>
#include <climits>
#include <map>
#include <algorithm>
#include <cmath>
#include <utility>
using namespace std;
#define ll long long int
#define pb push_back
#define mp make_pair
#define ull unsigned long long int
bool check(int arr[],int n,int m, int mid)
{
int ctr=1,temp=0;
for (int i = 0; i < n; ++i)
{
if(temp+arr[i]>mid)
{
ctr++;
if(ctr>m)
return false;
temp=arr[i];
}
else
temp+=arr[i];
}
return true;
}
int solve(int arr[], int n, int m)
{
int mid;
int left=0;
int right=0;
for(int i=0; i<n; ++i) right+=arr[i];
int ans=INT_MAX;
while(left <= right)
{
mid=(right+left)/2;
if(check(arr,n,m,mid))
{
ans=min(ans,mid);
right=mid-1;
}
else
left=mid+1;
}
return ans;
}
int main()
{
#ifndef ONLINE_JUDGE
freopen("input.txt","r",stdin);
freopen("output.txt","w",stdout);
#endif
int n,m;
cin>>n;
int arr[n];
for(int i=0; i<n; i++) cin>>arr[i];
sort(arr,arr+n);
cin>>m;
cout<<solve(arr,n,m);
return 0;
}
<file_sep># Binary-Search
Interesting Binary Search problems.
<file_sep> // https://www.codechef.com/MARCH18B/problems/MINEAT/
#include <iostream>
#include <set>
#include <map>
#include <algorithm>
#include <climits>
using namespace std;
int main()
{
#ifndef ONLINE_JUDGE
freopen("inp.txt","r",stdin);
freopen("out.txt", "w", stdout);
#endif
int t;
cin>>t;
while(t--)
{
int n;
cin>>n;
int h;
cin>>h;
int arr[n];
for(int i=0; i<n; i++)
cin>>arr[i];
//int j=*min_element(arr,arr+n);
int lm=*max_element(arr,arr+n);
int mn=INT_MAX;
int left=1, right=lm, mid=(left+right)/2;
while(left<=right)
{
int sum=0,k=0;
mid=(left+right)/2;
for(; k<n; k++)
{
sum+=(arr[k]/mid + ((arr[k]%mid)?1:0));
if(sum>h)
{
break;
}
}
if(sum>h)
{
left=mid+1;
}
else
{
right=mid-1;
}
if(k==n)
mn=min(mid,mn);
//cout<<left<<right<<" ";
}
cout<<mn<<endl;;
}
return 0;
}
|
920e801d11c9848ee76bd70704e7ec6d90030e76
|
[
"Markdown",
"C++"
] | 3 |
C++
|
krishankantray/Binary-Search
|
18189eff55423db31f9ad4dfb28f93d95712e856
|
ee8197f4d82fe956dcc33717b8b94698483c86ff
|
refs/heads/main
|
<file_sep># learn-data-structure-by-c
learn-data-structure-by-c
## ์ฐธ๊ณ ์์ค ์๋ฃ
- Data_Structure_Src/ ์ค์ฑ์ฐ(2012). ์ค์ฑ์ฐ์ ์ดํ ์๋ฃ๊ตฌ์กฐ. ์ค๋ ์ง๋ฏธ๋์ด
<file_sep>#!/bin/zsh
# ์๋์ฐ์ฆ์์ ์ปดํ์ผ๋ ์์ค ํ์ผ(์๋์ฐ์ฆ ํ๊ธ์ธ์ฝ๋ฉ:cp949) -> ๋งฅ ์์ค ํ์ผ(๋งฅ ํ๊ธ์ธ์ฝ๋ฉ:utf-8)
# iconv -t utf8 -f cp949 <source_win.c> source_mac.c
# -t utf8 ์๋ต ๊ฐ๋ฅ
FROM=cp949
TO=utf8
ICONV="iconv -t "${TO}" -f "${FROM}" "
ย
# ์์คํ์ผ ์๋งํผ ๋ฐ๋ณต๋ฌธ์ ๋๋ ค์ ์ธ์ฝ๋ฉํ ํ์ผ ํ์ผ ์์ฑ
#FILE_LIST=`find "${PWD}" -type f -name "*.[c|h]"`
#for file in "${FILE_LIST}"
for file in `find "${PWD}" -type f -name "*.[c|h]"`
do
echo "${file}"
ย ย ย ย `cp "${file}" "${file}".bak`
`iconv -t utf8 -f cp949 <"${file}".bak> "${file}"`
ย ย ย ย `rm "${file}".bak`
#ย ย ย ย "${ICONV}" <"${file}"> "${file}"
done
#find . -type f -name "*.c" | while read file;
#do
#ย ย ย ย `cp "${file}" "${file}".bak`
#ย ย ย ย `echo "${ICONV}" "${file}"`
#ย ย ย ย `"${ICONV}" <"${file}".bak> "${file}"`
#ย ย ย ย `rm "${file}".bak`
#done
## ๋ด๊ฐ ์ง์ ์ง๋ค๊ฐ ๊ด๋ ๊ฒ
# ํ์ผ ์ธ์ฝ๋ฉ ๋ณ๊ฒฝ ์ ํ์ฝ๋, ํ์ผ ์ ํ, ์์ค ํ์ผ ๊ฒฝ๋ก, ํ๊ฒ ํ์ผ ๊ฒฝ๋ก ๋ฐ๋๋ค.
#if [ "${#}" -ne 4 ];
#then
# echo "์ด ํ๋ผ๋ฏธํฐ์ ๊ฐ์๋ 4๊ฐ์
๋๋ค."
# echo "1. ์ ํ์ฝ๋(win/mac), 2, ํ์ผ ์ ํ(.c, .js ..), 3.์์คํ์ผ๊ฒฝ๋ก, 4.ํ๊ฒํ์ผ๊ฒฝ๋ก ๋ฅผ ์
๋ ฅํด์ฃผ์ธ์."
# exit 0
#fi
#
#ICONV_CD="${1}"
#FILE_TYPE="${2}"
#SOURCE_FILE="${3}"/**/*."${FILE_TYPE}"
#TARGET_FILE="${4}"
#
#for file in "${SOURCE_FILE}"
#do
# echo "${file}"
#done
#
#
#echo "${ICONV_CD}"
#echo "${FILE_TYPE}"
##echo "${SOURCE_FILE}"u
# ๋ฆฌ๋
์ค iconv ๋ช
๋ น์ด ๋ฌธ๋ฒ ์ ์ฉ
#FROM=euc-kr
#TO=utf-8
#ICONV="iconv -c -f $FROM -t $TO "
#ย
#find ./ -type f -name "*.c" | while read fn; do
#ย ย ย ย cp ${fn} ${fn}.bak
#ย ย ย ย echo $ICONV ${fn}
#ย ย ย ย $ICONV < ${fn}.bak > ${fn}
#ย ย ย ย rm ${fn}.bak
#done
<file_sep>#include <stdio.h>
int LSearch(int ar[], int len, int target)
{
int i;
for(i=0; i<len; i++)
{
if(ar[i]==target)
return i; // ๏งก์
|
a7211ad0a327d79d0ca09f05498d0724b3f080d2
|
[
"Markdown",
"C",
"Shell"
] | 3 |
Markdown
|
kwonhoe1121/learn-data-structure-by-c
|
6f1025cf4ffd5edd4890238290b3b35bad863942
|
95ef9442f049edf6f3589df667d201b00200e4b6
|
refs/heads/master
|
<file_sep>import axios from "axios";
import React, { useState } from "react";
export default function App() {
return (
<div>
<Reddit subreddit="/r/programming/" />
</div>
)
}
function Reddit(props) {
const [posts, setPosts] = useState([]);
const [subredditList, setSubredditList] = useState([]);
const [subreddit, setSubreddit] = useState(props.subreddit);
const getPosts = (subr) => {
setPosts([]);
axios.get(`https://www.reddit.com${subr ? subr.slice(0, -1) : subreddit.slice(0, -1)}.json`)
.then(res => {
const newPosts = res.data.data.children
.map(obj => obj.data);
setPosts(newPosts);
});
};
React.useEffect(() => {
axios.get(`https://www.reddit.com/subreddits.json`)
.then(res => {
const subReddits = res.data.data.children
.map(obj => obj.data.url);
setSubredditList(subReddits);
getPosts();
});
}, []);
return (
<div>
<h1>{subreddit}</h1>
<select onChange={(val) => { console.log(val.target.value); setSubreddit(val.target.value); getPosts(val.target.value); }}>
<option value={props.subreddit}>{props.subreddit}</option>
{subredditList.map(sub => (
<option value={sub}>{sub}</option>
))}
</select>
<ul>
{posts.map(post => (
<li key={post.id}>
{post.title}
</li>
))}
</ul>
</div>
);
}
|
10a6c78c62b856e1848803abdb149a29ea4cb675
|
[
"JavaScript"
] | 1 |
JavaScript
|
Kamori/React-recent-reddit-posts
|
011a51d15f693c79f59b81b0077b4d3246e59713
|
b64579ecb7e42639af27811f4fbc9ecc6fe3ec93
|
refs/heads/master
|
<file_sep>#!/usr/bin/env bash
function id_install {
local nm_os="${DISTRIB_ID}_$(uname -r | cut -d- -f1 | cut -d. -f1)"
echo "$nm_os/$PKG_HOME/$PKGSRC_BRANCH" | tr / _ | tr . _ | perl -pe 's{__+}{_}'
}
function _pkgsrc_profile {
local shome="${_pkgsrc_home:="$(cd -P -- "$(dirname -- "$BASH_SOURCE")/.." && pwd -P)"}"
if [[ -z "${PKG_HOME:-}" ]]; then
export PKG_HOME="${BOARD_PATH:-$HOME}"
fi
case "${DISTRIB_ID}" in
Darwin)
PATH="${BOARD_PATH}/homebrew/opt/coreutils/libexec/gnubin:$shome/bin:$PKG_HOME/install/sbin:$PKG_HOME/install/bin:$shome/exec:$PATH"
;;
*)
PATH="$PKG_HOME/install/gnu/bin:$shome/bin:$PKG_HOME/install/sbin:$PKG_HOME/install/bin:$shome/exec:$PATH"
;;
esac
export PKGSRC_BRANCH='pkgsrc-current'
export PKGSRC_BRANCH_DIR='current'
export PKG_CONFIG_PATH="${PKG_HOME}/install/lib/pkgconfig"
local pth_cache="${DATA}/cache"
export ID_INSTALL="$(id_install)"
export DISTDIR="$pth_cache/distfiles"
export PACKAGES="$pth_cache/packages/${ID_INSTALL}"
export WRKOBJDIR="${TMP:-${TMPDIR:-/tmp}}/pkgsrc_${ID_INSTALL}"
export PKG_OPT="env MAKEFLAGS= MFLAGS= DISTDIR=$DISTDIR WRKOBJDIR=$WRKOBJDIR PACKAGES=$PACKAGES PKG_RCD_SCRIPTS=no SKIP_LICENSE_CHECK=yes"
export BMAKE="$PKG_OPT bmake"
export SH="$(which bash)"
export PKGSRCDIR="${BOARD_PATH}/${PKGSRC_BRANCH}"
export PKG_REPOS="file://$PACKAGES/All"
export PKG_BUILD_SOURCE=1
}
_pkgsrc_profile
<file_sep>#!/usr/bin/env bash
function main {
local shome="$(cd -P -- "${BASH_SOURCE%/*}/.." && pwd -P)"
source "$shome/script/profile"
source "$shome/script/profile-reset"
local nm_pkg
for nm_pkg in "$@"; do
case "$nm_pkg" in
bootstrap)
env PKG_TARGET=package block compile bmake
;;
*)
env PKG_TARGET=package block compile pkgsrc "$nm_pkg"
;;
esac
done
}
source sub "$0" "$@"
<file_sep>#!/usr/bin/env bash
function main {
local shome="$(cd -P -- "${BASH_SOURCE%/*}/.." && pwd -P)"
source "$shome/script/profile"
source "$shome/script/profile-reset"
local nm_pkg
for nm_pkg in "$@"; do
case "$nm_pkg" in
bootstrap)
if [[ ! -d "$PKG_HOME" ]]; then
sudo install -d -o $(id -un) -g $(id -gn) -m 755 "$PKG_HOME"
fi
tar xvfz "$PACKAGES/bootstrap.tgz" -C /
;;
*)
PKG_TARGET=install block compile pkgsrc "$nm_pkg"
;;
esac
done
}
source sub "$0" "$@"
<file_sep>#!/usr/bin/env bash
function main {
local shome="$(cd -P -- "${BASH_SOURCE%/*}/.." && pwd -P)"
source "$shome/script/profile"
source "$shome/script/profile-reset"
pkgin -p ls | cut -d';' -f1 | perl -pe 's{-[^-\s]+$}{}' | sort -u
}
source sub "$0" "$@"
<file_sep>#!/usr/bin/env bash
function install_pkg {
pkgin -y install "$1"
}
function main {
local shome="$(cd -P -- "${BASH_SOURCE%/*}/.." && pwd -P)"
source "$shome/script/profile"
source "$shome/script/profile-reset"
local nm_pkg="$1"; shift
local nm_build="${1:-$nm_pkg}"
case "$nm_build" in
bootstrap)
if ! pkg install "$nm_build"; then
if [[ -z "${PKG_BUILD_SOURCE:-}" ]]; then
return 1
fi
pkg package "$nm_build"
fi
pkg_add -f "$(set +f; ls -trhd "${PACKAGES}/All/"pkgin-*.tgz 2>/dev/null | tail -1)" 2>/dev/null || true
;;
*)
pkg update cache
install_pkg just-importing-summary >/dev/null 2>&1 || true
if ! type -P pkgin >/dev/null || ! install_pkg "${nm_pkg#*/}"; then
if [[ -z "${PKG_BUILD_SOURCE:-}" ]]; then
return 1
fi
if type -P pkgin >/dev/null; then
if ! install_pkg "${nm_pkg#*/}"; then
pkg package "$nm_build"
pkg update cache
install_pkg "${nm_pkg#*/}"
fi
else
pkg bin-install "$nm_build"
fi
fi
;;
esac
pkg update cache
}
source sub "$0" "$@"
<file_sep>---
date: 2016-03-08T21:07:13+01:00
title: pkgsrc block
type: index
weight: 0
---
- https://acadix.biz/pkgsrc.php
<file_sep>#!/usr/bin/env bash
umask 022
export PATH="$PKG_HOME/install/sbin:$PKG_HOME/install/bin:/usr/bin:/usr/sbin:/bin:/sbin:$PATH"
unset CFLAGS CXXFLAGS CPPFLAGS FC FFLAGS LD LDFLAGS LD_LIBRARY_PATH
export CC=cc
export CXX=c++
export CPP="$(which cpp)"
export PKGSRC_USE_FORTIFY=no
export LANG=C # osx has some sed byte issue
<file_sep>#!/usr/bin/env bash
function main {
local shome="$(cd -P -- "${BASH_SOURCE%/*}/.." && pwd -P)"
source "$shome/script/profile"
source "$shome/script/profile-reset"
for a in bmake gtar pkgin lintpkgsrc; do
if [[ ! -x "$PKG_HOME/install/bin/$a" ]]; then
return 1
fi
done
return 0
}
source sub "$0" "$@"
<file_sep>#!/usr/bin/env bash
function main {
local shome="$(cd -P -- "${BASH_SOURCE%/*}/.." && pwd -P)"
source "$shome/script/profile"
source "$shome/script/profile-reset"
local nm_pkg="$1"; shift
pkg clean "$nm_pkg"
PKG_TARGET=update block compile pkgsrc "$nm_pkg"
PKG_TARGET=install block compile pkgsrc "$nm_pkg"
pkg ensure "$nm_pkg"
}
source sub "$0" "$@"
<file_sep>#!/usr/bin/env bash
function main {
local phome="$1"; shift
local pth_cache="$1"; shift
local pth_env="$1"; shift
local shome="$(cd -P -- "${BASH_SOURCE%/*}/.." && pwd -P)"
source "$shome/script/profile"
source "$shome/script/profile-reset"
export CC="$(basename "$(which clang gcc | head -1)")"
mkdir -p "$PACKAGES" "$WRKOBJDIR" "$DISTDIR"
rm -rf $WRKOBJDIR/bootstrap $PACKAGES/bootstrap.tgz
(cd $PKGSRCDIR/bootstrap && \
$PKG_OPT ./bootstrap --prefix=$PKG_HOME/install --unprivileged --abi 64 \
--prefer-pkgsrc=yes --ignore-user-check -j4 --compiler=$CC \
--mk-fragment=$HOME/etc/mk.conf \
--workdir $WRKOBJDIR/bootstrap \
$(if [[ "${PKG_TARGET:-}" == "package" ]]; then echo --gzip-binary-kit $PACKAGES/bootstrap.tgz; fi) \
)
}
source sub "$BASH_SOURCE" "$@"
<file_sep>#!/usr/bin/env bash
function main {
local shome="$(cd -P -- "${BASH_SOURCE%/*}/.." && pwd -P)"
source "$shome/script/profile"
source "$shome/script/profile-reset"
source normalize
mkdir -p "$(dirname "${DISTDIR}")" || sudo install -d -o $(id -un) -g $(id -gn) "$(dirname "${DISTDIR}")"
mkdir -p "$DISTDIR" "$PACKAGES"
if [[ "$#" == 0 ]]; then
set -- "${PACKAGES}/All"
fi
while [[ "$#" -gt 0 ]]; do
export PACKAGES="${1%%/All}"
export PKG_REPOS="file://${1}"
cd "$1"; shift
if type -P pkg_info >/dev/null; then
(set +f; pkg_info -X *.tgz) | gzip -9 > pkg_summary.gz.$$
mv -f pkg_summary.gz.$$ pkg_summary.gz
fi
if type -P pkgin >/dev/null; then
pkgin update
fi
done
}
source sub "$0" "$@"
<file_sep>#!/usr/bin/env bash
function main {
local phome="$1"; shift
local pth_cache="$1"; shift
local pth_env="$1"; shift
local shome="$(cd -P -- "${BASH_SOURCE%/*}/.." && pwd -P)"
source "$shome/script/profile"
source "$shome/script/profile-reset"
#export CC="$(basename "$(which clang gcc | head -1)")"
local nm_pkg
for nm_pkg in "$@"; do
if pushd "$PKGSRCDIR/$nm_pkg" 2>/dev/null >/dev/null; then
$BMAKE BIN_INSTALL_FLAGS=-U PKG_BEST_EXISTS=true "${PKG_TARGET:-install}"
popd >/dev/null
fi
done
}
source sub "$BASH_SOURCE" "$@"
<file_sep>#!/usr/bin/env bash
function main {
local shome="$(cd -P -- "${BASH_SOURCE%/*}/.." && pwd -P)"
source "$shome/script/profile"
source "$shome/script/profile-reset"
local nm_pkg
for nm_pkg in "$@"; do
(set +f; ls -d "$PKGSRCDIR"/*/$nm_pkg 2>/dev/null || true)
done | sed "s#$shome##" | cut -d/ -f4- | sort
}
source sub "$0" "$@"
<file_sep>#!/usr/bin/env bash
function main {
local shome="$(cd -P -- "${BASH_SOURCE%/*}/.." && pwd -P)"
source "$shome/script/profile"
source "$shome/script/profile-reset"
source normalize
if [[ -z "${PKG_HOME:-}" ]]; then
return 0
fi
mkdir -p "${DATA}/cache/git"
if [[ ! -d "$PKGSRCDIR" ]]; then
pushd "${BOARD_PATH}"
if [[ -f "${DATA}/cache/git/${PKGSRC_BRANCH}.tar.gz" ]]; then
tar xfz "${DATA}/cache/git/${PKGSRC_BRANCH}.tar.gz"
else
pkg update
pkg update save
fi
popd
fi
install -d "$PKG_HOME"
pkg ensure bootstrap
mkdir -p "$PKG_HOME/install/include"
ln -nfs gettext/libintl.h "$PKG_HOME/install/include/"
for nm_pkg in devel/ncurses devel/ncursesw devel/gettext-tools devel/gettext textproc/openjade devel/pcre2; do
if ! pkg ensure "$nm_pkg"; then
case "${DISTRIB_ID}" in
Darwin)
set +f
pushd "$WRKOBJDIR/$nm_pkg"/work/*/
set -f
env CWRAPPERS_CONFIG_DIR=$(pwd)/../.cwrapper/config bmake || env CWRAPPERS_CONFIG_DIR=$(pwd)/../.cwrapper/config make
popd
pkg ensure "$nm_pkg"
;;
*)
touch "$WRKOBJDIR/devel/gettext/work/.destdir/home/ubuntu/install/lib/preloadable_libintl.so" # alpine
pkg ensure "$nm_pkg"
;;
esac
fi
done
pkg ensure pkgtools/pkgin
pkg ensure pkgtools/pkg_chk
pkg ensure pkgtools/lintpkgsrc
pkgin update
pkg ensure security/mozilla-rootcerts
mozilla-rootcerts install || true
pushd $PKG_HOME/install/etc/openssl/certs
mozilla-rootcerts extract
mozilla-rootcerts rehash
popd
pkg ensure misc/getopt
pkg ensure www/curl
pkg ensure devel/gmake
pkg ensure devel/gettext-tools
pkg ensure archivers/gtar
pkg ensure sysutils/findutils
pkg ensure sysutils/coreutils
}
main "$@"
<file_sep>#!/usr/bin/env bash
function main {
local shome="$(cd -P -- "${BASH_SOURCE%/*}/.." && pwd -P)"
source "$shome/script/profile"
source "$shome/script/profile-reset"
rm -f pkgsrc.tar.gz
wget http://cdn.netbsd.org/pub/pkgsrc/${PKGSRC_BRANCH_DIR}/pkgsrc.tar.gz
mv pkgsrc.tar.gz "${PKGSRC_BRANCH}.tar.gz"
rm -rf "${PKGSRC_BRANCH}"
tar xfz "${PKGSRC_BRANCH}.tar.gz"
rm -f "${PKGSRC_BRANCH}.tar.gz"
mv pkgsrc "$PKGSRC_BRANCH"
$PKG_OPT pkg_chk -g
}
source sub "$0" "$@"
|
2c6a5a304efa3d77a7ae96e699591870847b31e3
|
[
"Markdown",
"Shell"
] | 15 |
Shell
|
imma/pkgsrc
|
a63907291e4c97945605981195130bb1d4e4fe49
|
c99d923dbcd792d80b91479ccdabcf3340a0acb1
|
refs/heads/master
|
<repo_name>ericswetts/W7D3<file_sep>/jBuilder/app/views/api/guests/_guest.json.jbuilder
json.extract! guest, :name, :favorite_color, :age
<file_sep>/jBuilder/app/views/api/gifts/index.json.jbuilder
json.array! @gifts do |one_gift|
json.partial! 'api/gifts/gift', gift: one_gift
end<file_sep>/jBuilder/app/views/api/guests/index.json.jbuilder
json.array! @guests do |one_guest|
json.partial! 'api/guests/guest', guest: one_guest
end<file_sep>/jBuilder/app/views/api/parties/index.json.jbuilder
json.array! @parties do |one_party|
json.partial! 'api/parties/party', party: one_party
end<file_sep>/jBuilder/app/views/api/parties/show.json.jbuilder
json.name @party.name
json.guests @party.guests do |guest|
json.name guest.name
json.gifts guest.gifts, partial: 'api/gifts/gift', as: :gift
end
|
d85b531b295681f9f9e21e7ca32d454fc920121b
|
[
"Ruby"
] | 5 |
Ruby
|
ericswetts/W7D3
|
f64ddc4213d06c8c34eafaa90d11d01f32ed1b87
|
d59972a520521b1c2e2f4e92806da1a1b321c5b9
|
refs/heads/master
|
<repo_name>wayne90040/Django-AllForOne<file_sep>/crawlerAPI/views/view_gasPrice.py
from django.http import JsonResponse
import json, requests
url = "https://www.cpc.com.tw/GetOilPriceJson.aspx?type=TodayOilPriceString"
re = requests.get(url, verify=False)
js = json.loads(re.content)
def post(request):
result = {'result': 0}
body = json.loads(request.body.decode('utf-8'))
type = body['Type']
try:
if type == 'All':
result['result'] = 1
result['Unleaded'] = float(js['sPrice1'])
result['Super'] = float(js['sPrice2'])
result['Supreme'] = float(js['sPrice3'])
result['AlcoholGas'] = float(js['sPrice4'])
result['Diesel'] = float(js['sPrice5'])
result['LiquefiedGas'] = float(js['sPrice6'])
elif type == 'Unleaded':
result['result'] = 1
result['Unleaded'] = float(js['sPrice1'])
elif type == 'Super':
result['result'] = 1
result['Super'] = float(js['sPrice2'])
elif type == 'Supreme':
result['result'] = 1
result['Supreme'] = float(js['sPrice3'])
elif type == 'AlcoholGas':
result['result'] = 1
result['AlcoholGas'] = float(js['sPrice4'])
elif type == 'Diesel':
result['result'] = 1
result['Diesel'] = float(js['sPrice5'])
elif type == 'LiquefiedGas':
result['result'] = 1
result['LiquefiedGas'] = float(js['sPrice6'])
except Exception as e:
result['Exception'] = str(e)
return JsonResponse(result)
<file_sep>/requirements.txt
asgiref==3.2.10
certifi==2020.6.20
chardet==3.0.4
dj-database-url==0.5.0
dj-static==0.0.6
Django==3.0.8
gunicorn==20.0.4
heroku==0.1.4
idna==2.10
python-dateutil==1.5
pytz==2020.1
requests==2.24.0
sqlparse==0.3.1
static3==0.7.0
urllib3==1.25.9
psycopg2-binary==2.8.2<file_sep>/README.md
## AllForOne_Django
#### Heroku-Django Practice
<details>
<summary> Introduction </summary>
- AllForOne API backstage
- Tools
- Pycharm
- Django 3.0.8
- Note
- [How to use Heroku](#heroku)
- [How to use AllForOne API](#api)
- Reference
- https://djangogirlstaipei.herokuapp.com/tutorials/deploy-to-heroku/?os=windows
</details>
<a name="heroku"></a>
## How to use Heroku
### Create requirements.txt
> pip freeze > requirements.txt
### Create Procfile
#### Web:
> gunicorn --pythonpath siteName siteName.wsgi
#### xxx.py:
> xxx: python xxx.py
### Create production_settings.py for Heroku
* Path:
> siteName/siteName/production_settings.py
* Code:
```python
# Import all default settings.
from .settings import *
import dj_database_url
DATABASES = {
'default': dj_database_url.config(),
}
# Static asset configuration.
STATIC_ROOT = 'staticfiles'
# Honor the 'X-Forwarded-Proto' header for request.is_secure().
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers.
ALLOWED_HOSTS = ['*']
# Turn off DEBUG mode.
DEBUG = False
```
### wsgi.py
* Path:
> siteName/siteName/wsgi.py
```python
import os
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "siteName.settings")
application = Cling(get_wsgi_application())
```
<a name="api"></a>
## How to use AllForOne API
> https://allforone-back.herokuapp.com/
Url | Memo |
|:---:|:---:|
|AQI/ | ็ฉบๆฐฃๅ่ณช |
|WARN/ | ่ญฆๅ ฑ |
|GasPrice/ | ๆฒนๅน |
|Bike/ | Ubike |
|CloseBike/| ๆ่ฟBike |
|Weather/ | ๆฐฃๆบซ |
|PreWeather/ | ่ฟ36ๅฐๆๆฐฃๆบซ |
### API Detail
<details>
<summary> AQI/(็ฉบๆฐฃๅ่ณช) </summary>
* request:
```json
{
"Longitude": 120.000,
"Latitude": 20.000
}
```
* response:
```json
{
"result": 1,
"SiteName": "",
"County": "",
"AQI": "",
"Pollutant": "",
"AQIStatus": "",
"PM10": "",
"PM25": "",
"WindSpeed": "",
"WindDir": "",
"PM10Avg": "",
"PM25Avg": "",
"Date": "",
"Time": "",
"So2": "",
"Co": "",
"O3": "",
"So2Avg": "",
"PM25Status": ""
}
```
</details>
<details>
<summary> WARN/(่ญฆๅ ฑ) </summary>
* request:
```json
{
"City": "้ซ้ๅธ"
}
```
* response:
```json
{
"result": 1,
"locationName": "",
"hazardConditions": ""
}
```
</details>
<details>
<summary> GasPrice/(ๆฒนๅน) </summary>
* request:
* "All" - ๅๅณๅ
จ้จ
```json
{
"Type": "Unleaded"
}
```
* response:
```json
{
"result": 1,
"Unleaded": 22.4
}
```
</details>
<details>
<summary> Bike/(All Ubike) </summary>
* request:
```json
{
"City": "Taipei"
}
```
* response:
```json
{
"result": 1,
"type": "ๅ
จ้จ็ซ้ป",
"bikes": [
{
"StationUID": "",
"StationID": "",
"StationName_zh": "",
"StationLatitude": 24.97848,
"StationLongitude": 121.55545,
"stationAddress_zh": "",
"BikesCapacity": 26,
"ServiceAvailable": 1,
"AvailableRentBikes": 15,
"AvailableReturnBikes": 11,
"UpdateTime": "",
"haversine": ""
}
]
}
```
</details>
<details>
<summary> CloseBike/(ๆ่ฟBike) </summary>
* request:
```json
{
"Longitude": 120.000,
"Latitude": 20.000,
"City": "Taipei",
"Type": 1
}
```
* response:
```json
{
"result": 1,
"type": "่ชๅทฑๆ่ฟ็ซ้ป",
"bikes": [
{
"StationUID": "",
"StationID": "",
"StationName_zh": "",
"StationLatitude": 24.97848,
"StationLongitude": 121.55545,
"stationAddress_zh": "",
"BikesCapacity": 26,
"ServiceAvailable": 1,
"AvailableRentBikes": 15,
"AvailableReturnBikes": 11,
"UpdateTime": "",
"haversine": ""
}
]
}
```
</details>
<details>
<summary> Weather/(ๆฐฃๆบซ) </summary>
* request:
```json
{
"Longitude": 120.000,
"Latitude": 20.000
}
```
* response:
```json
{
"result": 1,
"locationName": "ๆๆฅ",
"WDIR": "130",
"WDSD": "1.10",
"TEMP": "26.40",
"HUMD": "0.99",
"RAINFALL": "0",
"H_UVI": "0",
"D_TX": "26.60",
"D_TXT": "0020",
"D_TN": "26.30",
"D_TNT": "0008"
}
```
</details>
<details>
<summary> PreWeather/(่ฟ36ๅฐๆๆฐฃๆบซ) </summary>
* request:
```json
{
"City": "Taipei"
}
```
* response:
```json
{
"result": 0,
"locationName": "่บๅๅธ",
"weatherElement": [
{
"elementName": "Wx",
"time": [
{
"startTime": "2020-08-16T00:00:00+08:00",
"endTime": "2020-08-16T06:00:00+08:00",
"parameter": {
"parameterName": "ๅค้ฒ",
"parameterValue": "4"
}
},
{
"startTime": "2020-08-16T06:00:00+08:00",
"endTime": "2020-08-16T18:00:00+08:00",
"parameter": {
"parameterName": "ๆดๅๅพ็ญๆซ้ท้ฃ้จ",
"parameterValue": "21"
}
},
{
"startTime": "2020-08-16T18:00:00+08:00",
"endTime": "2020-08-17T06:00:00+08:00",
"parameter": {
"parameterName": "ๆดๆๅค้ฒ",
"parameterValue": "2"
}
}
]
}
]
}
```
</details>
<file_sep>/crawlerAPI/views/view_airQuality.py
from django.http import JsonResponse
import json, requests
url = "https://opendata.epa.gov.tw/webapi/Data/REWIQA/?$orderby=SiteName&$skip=0&$top=1000&format=json"
re = requests.get(url, verify=False)
js_list = json.loads(re.content)
def post(request):
result = {'result': 0}
body = json.loads(request.body.decode(encoding='utf-8'))
user_lon = body['Longitude']
user_lat = body['Latitude']
hy_list = []
try:
for js in js_list:
hy = (float(js['Longitude']) - float(user_lon)) ** 2 + (float(js['Latitude']) - float(user_lat)) ** 2
hy_list.append(hy)
min_hy = min(hy_list)
for js in js_list:
if min_hy == (float(js['Longitude']) - float(user_lon)) ** 2 + (
float(js['Latitude']) - float(user_lat)) ** 2:
result['result'] = 1
result['SiteName'] = js['SiteName']
result['County'] = js['County']
result['AQI'] = js['AQI']
result['Pollutant'] = js['Pollutant']
result['AQIStatus'] = js['Status']
result['PM10'] = js['PM10']
result['PM25'] = js['PM2.5']
result['WindSpeed'] = js['WindSpeed']
result['WindDir'] = js['WindDirec']
result['PM10Avg'] = js['PM10_AVG']
result['PM25Avg'] = js['PM2.5_AVG']
result['Date'] = js['PublishTime'].split(' ')[0]
result['Time'] = js['PublishTime'].split(' ')[1]
result['So2'] = js['SO2']
result['Co'] = js['CO']
result['O3'] = js['O3']
result['So2Avg'] = js['SO2_AVG']
result['PM25Status'] = 'test'
break
except Exception as e:
result['Exception'] = str(e)
return JsonResponse(result)
<file_sep>/crawlerAPI/views/view_bikeRent.py
from django.http import JsonResponse
from datetime import datetime, timedelta
from wsgiref.handlers import format_date_time
from math import radians, cos, sin, asin, sqrt
from time import mktime
from hashlib import sha1
import json, requests
import hmac
import base64
app_id = 'ea0b964c043b4c19a7e8cb52511842be'
app_key = '3MjiMorUMhlsPR0SbjDCRGS06_s'
class Auth():
def __init__(self, app_id, app_key):
self.app_id = app_id
self.app_key = app_key
def get_auth_header(self):
xdate = format_date_time(mktime(datetime.now().timetuple()))
hashed = hmac.new(self.app_key.encode('utf8'), ('x-date: ' + xdate).encode('utf8'), sha1)
signature = base64.b64encode(hashed.digest()).decode()
authorization = 'hmac username="' + self.app_id + '", ' + \
'algorithm="hmac-sha1", ' + \
'headers="x-date", ' + \
'signature="' + signature + '"'
return {
'Authorization': authorization,
'x-date': format_date_time(mktime(datetime.now().timetuple())),
'Accept - Encoding': 'gzip'
}
class Bike():
def __init__(self, city):
self.city = city
def get_bike(self):
auth = Auth(app_id, app_key)
id_urls = {"Taipei": "https://ptx.transportdata.tw/MOTC/v2/Bike/Station/Taipei?$format=JSON",
"NewTaipei": "https://ptx.transportdata.tw/MOTC/v2/Bike/Station/NewTaipei?$format=JSON",
"Hsinchu": "https://ptx.transportdata.tw/MOTC/v2/Bike/Station/Hsinchu?$format=JSON",
"MiaoliCounty": "https://ptx.transportdata.tw/MOTC/v2/Bike/Station/MiaoliCounty?$format=JSON",
"ChanghuaCounty": "https://ptx.transportdata.tw/MOTC/v2/Bike/Station/ChanghuaCounty?$format=JSON",
"PingtungCounty": "https://ptx.transportdata.tw/MOTC/v2/Bike/Station/PingtungCounty?$format=JSON",
"Taoyuan": "https://ptx.transportdata.tw/MOTC/v2/Bike/Station/Taoyuan?$format=JSON",
"Kaohsiung": "https://ptx.transportdata.tw/MOTC/v2/Bike/Station/Kaohsiung?$format=JSON",
"Tainan": "https://ptx.transportdata.tw/MOTC/v2/Bike/Station/Tainan?$format=JSON",
"Taichung": "https://ptx.transportdata.tw/MOTC/v2/Bike/Station/Taichung?$format=JSON"}
bike_urls = {"Taipei": "https://ptx.transportdata.tw/MOTC/v2/Bike/Availability/Taipei?$format=JSON",
"NewTaipei": "https://ptx.transportdata.tw/MOTC/v2/Bike/Availability/NewTaipei?$format=JSON",
"Hsinchu": "https://ptx.transportdata.tw/MOTC/v2/Bike/Availability/Hsinchu?$format=JSON",
"MiaoliCounty": "https://ptx.transportdata.tw/MOTC/v2/Bike/Availability/MiaoliCounty?$format=JSON",
"ChanghuaCounty": "https://ptx.transportdata.tw/MOTC/v2/Bike/Availability/ChanghuaCounty?$format=JSON",
"PingtungCounty": "https://ptx.transportdata.tw/MOTC/v2/Bike/Availability/PingtungCounty?$format=JSON",
"Taoyuan": "https://ptx.transportdata.tw/MOTC/v2/Bike/Availability/Taoyuan?$format=JSON",
"Kaohsiung": "https://ptx.transportdata.tw/MOTC/v2/Bike/Availability/Kaohsiung?$format=JSON",
"Tainan": "https://ptx.transportdata.tw/MOTC/v2/Bike/Availability/Tainan?$format=JSON",
"Taichung": "https://ptx.transportdata.tw/MOTC/v2/Bike/Availability/Taichung?$format=JSON"}
id_url = id_urls[self.city]
bike_url = bike_urls[self.city]
id_re = requests.get(id_url, headers=auth.get_auth_header(), verify=False)
bike_re = requests.get(bike_url, headers=auth.get_auth_header(), verify=False)
id_js = json.loads(id_re.content)
bike_js = json.loads(bike_re.content)
bike_list = []
for id_ in id_js:
for bike in bike_js:
if id_['StationUID'] == bike['StationUID']:
stationUID = id_['StationUID']
stationID = id_['StationID']
stationName_zh = id_['StationName']['Zh_tw']
# stationName_en = id_['StationName']['En']
stationLatitude = id_['StationPosition']['PositionLat']
stationLongitude = id_['StationPosition']['PositionLon']
stationAddress_zh = id_['StationAddress']['Zh_tw']
# stationAddress_en = a['StationAddress']['En']
bikesCapacity = id_['BikesCapacity']
servieAvailable = bike['ServiceAvailable'] # ๆๅ็ๆ
:[0:'ๅๆญข็้',1:'ๆญฃๅธธ็้']
availableRentBikes = bike['AvailableRentBikes'] # ๅฏ็งๅๅๆธ
availableReturnBikes = bike['AvailableReturnBikes'] # ๅฏๆญธ้ๆธ
updateTime = id_['UpdateTime']
bikedic = {'StationUID': stationUID, 'StationID': stationID, 'StationName_zh': stationName_zh,
'StationLatitude': stationLatitude,
'StationLongitude': stationLongitude, 'stationAddress_zh': stationAddress_zh,
'BikesCapacity': bikesCapacity,
'ServiceAvailable': servieAvailable, 'AvailableRentBikes': availableRentBikes,
'AvailableReturnBikes': availableReturnBikes,
'UpdateTime': updateTime}
bike_list.append(bikedic)
return bike_list
def haversine(lon1, lat1, lon2, lat2): # ็ปๅบฆ1๏ผ็บฌๅบฆ1๏ผ็ปๅบฆ2๏ผ็บฌๅบฆ2 ๏ผๅ่ฟๅถๅบฆๆฐ๏ผ
# ๅฐๅ่ฟๅถๅบฆๆฐ่ฝฌๅไธบๅผงๅบฆ
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
d_lon = lon2 - lon1 # haversineๅ
ฌๅผ
d_lat = lat2 - lat1
a = sin(d_lat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(d_lon / 2) ** 2
c = 2 * asin(sqrt(a))
r = 6371 # ๅฐ็ๅนณๅๅๅพ๏ผๅไฝไธบๅ
ฌ้
return c * r * 1000
def get_all_bike(request):
result = {'result': 0}
body = json.loads(request.body.decode(encoding="utf-8"))
body_city = body['City']
try:
city_bikes = Bike(body_city).get_bike()
except Exception as e:
result['Exception'] = str(e)
if len(city_bikes) == 0:
result['type'] = 'ๅ
จ้จ็ซ้ป'
result['bikes'] = {'error': '้ฃ็ท็ฐๅธธ๏ผ่ซ็จๅพๅ่ฉฆ'}
else:
result['result'] = 1
result['type'] = 'ๅ
จ้จ็ซ้ป'
result['bikes'] = city_bikes
return JsonResponse(result)
def get_close_bike(request):
result = {'result': 0}
body = json.loads(request.body.decode(encoding="utf-8"))
body_lon = body["Longitude"]
body_lat = body["Latitude"]
body_city = body["City"]
body_type = body["Type"]
haver_list = []
city_bikes = Bike(body_city).get_bike()
# Get Min haversine
for city_bike in city_bikes:
haver = haversine(body_lon, body_lat, float(city_bike['StationLongitude']), float(city_bike['StationLatitude']))
haver_list.append(haver)
haver_min = min(haver_list)
for city_bike in city_bikes:
if body_type == 1:
try:
if haver_min == haversine(body_lon, body_lat, float(city_bike['StationLongitude']), float(city_bike['StationLatitude'])):
result['result'] = 1
result['type'] = "่ชๅทฑๆ่ฟ็ซ้ป"
city_bike['haversine'] = str(haver_min).split(".")[0]
result['bikes'] = [city_bike]
break
except Exception as e:
result['Exception'] = str(e)
if result['result'] == 0:
result['error'] = 'ไธปๆฉ็ฐๅธธ๏ผ่ซ็จๅพๅ่ฉฆ'
return JsonResponse(result)
<file_sep>/crawlerAPI/views/view_weather.py
from django.http import JsonResponse
import json, requests
url = "https://opendata.cwb.gov.tw/fileapi/v1/opendataapi/O-A0003-001?Authorization=rdec-key-123-45678-011121314&format=JSON"
re = requests.get(url, verify=False)
js = json.loads(re.content)
weather_datas = js['cwbopendata']['location']
def post(request):
result = {'result': 0}
body = json.loads(request.body.decode(encoding='utf-8'))
body_lon = body['Longitude']
body_lat = body['Latitude']
hypotenuse_list = []
try:
for weather_data in weather_datas:
hypotenuse = (float(weather_data['lon']) - float(body_lon)) ** 2 + (float(weather_data['lat']) - float(body_lat)) ** 2
hypotenuse_list.append(hypotenuse)
min_hpy = min(hypotenuse_list)
for weather_data in weather_datas:
if min_hpy == (float(weather_data['lon']) - float(body_lon)) ** 2 + (float(weather_data['lat']) - float(body_lat)) ** 2:
result['result'] = 1
result['locationName'] = weather_data['locationName']
weatherElements = weather_data['weatherElement']
for weatherElement in weatherElements:
# ้ขจๅ
if weatherElement["elementName"] == "WDIR":
result['WDIR'] = weatherElement['elementValue']['value']
# ้ขจ้
elif weatherElement["elementName"] == "WDSD":
result['WDSD'] = weatherElement['elementValue']['value']
# ๆบซๅบฆ
elif weatherElement["elementName"] == "TEMP":
result['TEMP'] = weatherElement['elementValue']['value']
# ๆฟๅบฆ
elif weatherElement["elementName"] == "HUMD":
result['HUMD'] = weatherElement['elementValue']['value']
# 24 ๅฐๆ้้จ้
elif weatherElement["elementName"] == "24R":
result['RAINFALL'] = weatherElement['elementValue']['value']
# ๆฏๅฐๆ็ดซๅค็ท
elif weatherElement["elementName"] == "H_UVI":
result['H_UVI'] = weatherElement['elementValue']['value']
# ๆ้ซๆบซๅบฆ
elif weatherElement["elementName"] == "D_TX":
result['D_TX'] = weatherElement['elementValue']['value']
# ๆ้ซๆบซๅบฆ็ผ็ๆ้
elif weatherElement["elementName"] == "D_TXT":
result['D_TXT'] = weatherElement['elementValue']['value']
# ๆไฝๆบซๅบฆ
elif weatherElement["elementName"] == "D_TN":
result['D_TN'] = weatherElement['elementValue']['value']
# ๆไฝๆบซๅบฆ็ผ็ๆ้
elif weatherElement["elementName"] == "D_TNT":
result['D_TNT'] = weatherElement['elementValue']['value']
break
except Exception as e:
result['Exception'] = str(e)
return JsonResponse(result)
<file_sep>/crawlerAPI/views/view_warning.py
from django.http import JsonResponse
import json, requests
url = "https://opendata.cwb.gov.tw/fileapi/v1/opendataapi/W-C0033-001?Authorization=CWB-242E2AA6-F542-43E1-973D-9A0A4DBB7E5E&downloadType=WEB&format=JSON"
re = requests.get(url, verify=False)
js_list = json.loads(re.content)['cwbopendata']["dataset"]["location"]
def post(request):
result = {'result': 0}
body = json.loads(request.body)
body_city = body['City']
try:
for js in js_list:
if body_city in js['locationName']:
result['result'] = 1
result['locationName'] = js['locationName']
hazardConditions = js['hazardConditions']
if hazardConditions is dict: # ๆ่ญฆๅ ฑ
result['hazardConditions'] = js['hazards']['info']['phenomena']
else:
result['hazardConditions'] = "None"
break
except Exception as e:
result['Exception'] = str(e)
return JsonResponse(result)
<file_sep>/crawlerAPI/views/view_preweather.py
from django.http import JsonResponse
import json, requests
url = "https://opendata.cwb.gov.tw/fileapi/v1/opendataapi/F-C0032-001?Authorization=rdec-key-123-45678-011121314&format=JSON"
re = requests.get(url, verify=False)
js = json.loads(re.content)
preweather_datas = js['cwbopendata']["dataset"]["location"]
def formatCityName(cityName):
result = ""
if cityName == "Taipei":
result = "่บๅๅธ"
return result
def post(request):
result = {'result': 0}
body = json.loads(request.body.decode(encoding='utf-8'))
body_city = body['City']
try:
for preweather_data in preweather_datas:
if preweather_data['locationName'] == formatCityName(body_city):
result['locationName'] = preweather_data['locationName']
result['weatherElement'] = preweather_data['weatherElement']
except Exception as e:
result['Exception'] = str(e)
return JsonResponse(result)
|
43191a7260a12fbd2e432910e107cd87e7ca0392
|
[
"Markdown",
"Python",
"Text"
] | 8 |
Python
|
wayne90040/Django-AllForOne
|
b86228391fd8b47b55c6cc28e5cfffc88f90deef
|
b6ca26f34a6321508a39f7c5b4abf88d17a06dbe
|
refs/heads/master
|
<file_sep>import glob
import numpy as np
import pandas as pd
from skimage.io import imread
from sklearn.preprocessing import LabelEncoder
from skimage.color import gray2rgb, rgb2gray
from skimage.color.adapt_rgb import adapt_rgb, each_channel
from skimage import filters
from skimage import exposure
import skimage.transform
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
def img_draw(im_arr, im_names, n_imgs):
plt.figure(1)
n_rows = int(np.sqrt(n_imgs))
n_cols = n_imgs / n_rows
for img_i in range(n_imgs):
plt.subplot(n_cols, n_rows, img_i + 1)
plt.title(im_names[img_i].split('/')[-1].split('.')[0])
if len(im_arr.shape) == 4:
img = im_arr[img_i]
else:
img = im_arr[img_i]
plt.imshow(img)
plt.show()
def img_draw_test(im_arr, im_names, n_imgs):
plt.figure(1)
n_rows = int(np.sqrt(n_imgs))
n_cols = n_imgs / n_rows
for img_i in range(n_imgs):
plt.subplot(n_cols, n_rows, img_i + 1)
plt.title(im_names[img_i])
if len(im_arr.shape) == 4:
img = im_arr[img_i]
else:
img = im_arr[img_i]
plt.imshow(img)
plt.show()
def imp_img(img_name):
# read
img = imread(img_name)
# if gray convert to color
if len(img.shape) == 2:
img = gray2rgb(img)
return img
@adapt_rgb(each_channel)
def sobel_each(image):
return filters.sobel(image)
@adapt_rgb(each_channel)
def rescale_intensity_each(image, low, high):
plow, phigh = np.percentile(img_file, (low, high))
return np.clip(exposure.rescale_intensity(image, in_range=(plow, phigh)), 0, 1)
"""
Vars
"""
submit_name = 'ImageDataGenerator_tst.csv'
debug = False
n_fold = 2
debug_n = 100
"""
Import images
"""
img_size = 30
# Train
path = "data"
train_names = sorted(glob.glob(path + "/trainResized/*"))
train_labels_index = pd.DataFrame.from_csv('trainLabels.csv')
train_files = np.zeros((len(train_names), img_size, img_size, 3)).astype('float32')
train_labels = np.zeros((len(train_names),)).astype(str)
for i, name_file in enumerate(train_names):
image = imp_img(name_file)
train_files[i, :, :, :] = image
train_labels[i] = train_labels_index.loc[int(name_file.split('.')[0].split('/')[-1])]['Class']
# Test
test_names = sorted(glob.glob(path + "/testResized/*"))
test_files = np.zeros((len(test_names), img_size, img_size, 3)).astype('float32')
for i, name_file in enumerate(test_names):
image = imp_img(name_file)
test_files[i, :, :, :] = image
train_files /= 255
test_files /= 255
label_encoder = LabelEncoder()
train_labels = label_encoder.fit_transform(train_labels)
print(train_files.shape, test_files.shape)
print(np.unique(train_labels))
"""
Image processing
"""
if debug:
img_draw(train_files, train_names, debug_n)
# Contrast streching
for i, img_file in enumerate(train_files):
train_files[i, :, :, :] = rescale_intensity_each(img_file, 20, 80)
for i, img_file in enumerate(test_files):
test_files[i, :, :, :] = rescale_intensity_each(img_file, 20, 80)
if debug:
img_draw(train_files, train_names, debug_n)
# Find and borders
for i, img_file in enumerate(train_files):
train_files[i, :, :, :] = sobel_each(img_file)
for i, img_file in enumerate(test_files):
test_files[i, :, :, :] = sobel_each(img_file)
# Contrast streching
for i, img_file in enumerate(train_files):
train_files[i, :, :, :] = rescale_intensity_each(img_file, 5, 95)
for i, img_file in enumerate(test_files):
test_files[i, :, :, :] = rescale_intensity_each(img_file, 5, 95)
if debug:
img_draw(train_files, train_names, debug_n)
train_files_gray = np.zeros((len(train_names), img_size, img_size)).astype('float32')
test_files_gray = np.zeros((len(test_names), img_size, img_size)).astype('float32')
# Change to gray
for i, img_file in enumerate(train_files):
train_files_gray[i, :, :] = rgb2gray(img_file)
for i, img_file in enumerate(test_files):
test_files_gray[i, :, :] = rgb2gray(img_file)
if debug:
img_draw(train_files_gray, train_names, debug_n)
# Contrast streching
for i, img_file in enumerate(train_files_gray):
p0, p100 = np.percentile(img_file, (0, 100))
train_files_gray[i, :, :] = exposure.rescale_intensity(img_file, in_range=(p0, p100))
for i, img_file in enumerate(test_files_gray):
p0, p100 = np.percentile(img_file, (0, 100))
test_files_gray[i, :, :] = exposure.rescale_intensity(img_file, in_range=(p0, p100))
if debug:
img_draw(train_files_gray, train_names, debug_n)
"""
Configure train/test
"""
np.random.seed(2016)
i_part = 1.0 / n_fold
batch_size = 128
nb_classes = 62
nb_epoch = 20
np.random.seed(7)
cv_prob = np.random.sample(train_files.shape[0])
# input image dimensions
img_rows, img_cols = img_size, img_size
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
# convert class vectors to binary class matrices
train_labels_dummy = np_utils.to_categorical(train_labels, nb_classes)
test_results = []
acc = []
for i_fold in range(n_fold):
test_cv_ind = np.logical_and(i_fold * i_part <= cv_prob, (i_fold + 1) * i_part > cv_prob)
train_cv_ind = np.logical_not(np.logical_and(i_fold * i_part <= cv_prob, (i_fold + 1) * i_part > cv_prob))
X_train, y_train = train_files_gray[train_cv_ind, :, :], train_labels[train_cv_ind]
X_test, y_test = train_files_gray[test_cv_ind, :, :], train_labels[test_cv_ind]
"""
Compile Model
"""
# the data, shuffled and split between train and test sets
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
np.random.seed(1007) # for reproducibility
model = Sequential()
"""
inner layers start
"""
model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='valid', input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
"""
inner layers stop
"""
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
sgd = SGD(lr=0.03, decay=1e-5, momentum=0.7, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
model.reset_states()
for epoth_i in range(nb_epoch):
model.train_on_batch(X_train, Y_train, show_accuracy=True, verbose=1, validation_data=(X_test, Y_test))
# this will do preprocessing and realtime data augmentation
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=False, # randomly flip images
vertical_flip=False) # randomly flip images
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(X_train)
# fit the model on the batches generated by datagen.flow()
model.fit_generator(datagen.flow(X_train, Y_train,
batch_size=batch_size),
samples_per_epoch=X_train.shape[0],
nb_epoch=nb_epoch, show_accuracy=True, verbose=1,
validation_data=(X_test, Y_test))
"""
Get accuracy
"""
score = model.evaluate(X_test, Y_test, verbose=0, show_accuracy=True)
print('Test score:', score[0])
print('Test accuracy:', score[1])
acc.append(score[1])
predicted_results = model.predict_classes(X_test, batch_size=batch_size, verbose=1)
print(label_encoder.inverse_transform(predicted_results))
print(label_encoder.inverse_transform(y_test))
unsuccesful_predict = np.logical_not(predicted_results == y_test)
img_draw_test(X_test[unsuccesful_predict, 0, :, :], label_encoder.inverse_transform(y_test[unsuccesful_predict]),
debug_n)
if n_fold > 1:
print('The accuracy is %.3f' % np.mean(acc))
"""
Solve and submit test
"""
np.random.seed(1007) # for reproducibility
model.compile(loss='categorical_crossentropy', optimizer=sgd)
# the data, shuffled and split between train and test sets
train_files_gray = train_files_gray.reshape(train_files.shape[0], 1, img_rows, img_cols)
test_files_gray = test_files_gray.reshape(test_files.shape[0], 1, img_rows, img_cols)
# Fit the whole train data
# this will do preprocessing and realtime data augmentation
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=False, # randomly flip images
vertical_flip=False) # randomly flip images
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(train_files_gray)
# fit the model on the batches generated by datagen.flow()
model.fit_generator(datagen.flow(train_files_gray, train_labels_dummy, batch_size=batch_size),
samples_per_epoch=train_files.shape[0], nb_epoch=int(nb_epoch * 1.2), show_accuracy=True, verbose=1)
predicted_results = model.predict_classes(test_files_gray, batch_size=batch_size, verbose=1)
predicted_results = label_encoder.inverse_transform(predicted_results)
test_index = []
for file_name in test_names:
test_index.append(int(file_name.split('.')[0].split('/')[-1]))
sub_file = pd.DataFrame.from_csv('sampleSubmission.csv')
sub_file.Class = predicted_results
sub_file.index = test_index
sub_file.index.name = 'ID'
sub_file.to_csv(submit_name)
# each_border -> rgb2gray: 0.7018
# each_rescale_intensity -> each_border -> rgb2gray: 0.7081
# each_rescale_intensity -> each_border -> each_rescale_intensity -> rgb2gray:
# Epoch 15, val_loss: 1.2972 - val_acc: 0.7069
# each_rescale_intensity -> each_border -> rgb2gray -> rescale_intensity: Epoch 12, val_loss: 1.2152 - val_acc: 0.7037
# each_equalize_hist -> each_border -> rgb2gray -> equalize_hist: Epoch 16, val_loss: 1.2984 - val_acc: 0.6846
# each_rescale_intensity -> each_border -> rgb2gray -> rescale_intensity, sobel(10, 90):
# Epoch 18, val_loss: 1.2860 - val_acc: 0.7094
<file_sep>"""
Import images
"""
import glob
import numpy as np
from skimage.io import imread
from skimage.color import gray2rgb, rgb2gray
from skimage.transform import resize
from skimage.color.adapt_rgb import adapt_rgb, each_channel, hsv_value
from skimage import filters
from skimage import exposure
import matplotlib.pyplot as plt
def img_draw(im_list, im_names):
plt.figure(1)
n_rows = int(np.sqrt(len(im_list)))
n_cols = len(im_list) / n_rows
for img_i in range(len(im_list)):
plt.subplot(n_cols, n_rows, img_i + 1)
plt.title(im_names[img_i].split('/')[-1].split('.')[0])
plt.imshow(im_list[img_i])
plt.show()
def imp_img(img_name):
# read
img = imread(img_name)
# if gray convert to color
if len(img.shape) == 2:
img = gray2rgb(img)
return img
@adapt_rgb(each_channel)
def sobel_each(image):
return filters.sobel(image)
@adapt_rgb(hsv_value)
def sobel_hsv(image):
return filters.sobel(image)
n_images = 64
img_size = 30
path = "data"
train_names = sorted(glob.glob(path + "/train/*"))
train_files = []
for i, name_file in enumerate(train_names):
train_files.append(imp_img(name_file))
train_names = train_names[:n_images]
train_files = train_files[:n_images]
# Resize
for i, img_file in enumerate(train_files):
train_files[i] = resize(img_file, (img_size, img_size))
img_draw(train_files, train_names)
# Find borders
for i, img_file in enumerate(train_files):
train_files[i] = sobel_each(img_file)
img_draw(train_files, train_names)
# Chane to gray
for i, img_file in enumerate(train_files):
train_files[i] = rgb2gray(img_file)
img_draw(train_files, train_names)
# Contrast stretching
for i, img_file in enumerate(train_files):
p2, p98 = np.percentile(img_file, (2, 98))
train_files[i] = exposure.rescale_intensity(img_file, in_range=(p2, p98))
img_draw(train_files, train_names)
<file_sep>import glob
from skimage.transform import resize
from skimage.io import imread, imsave
import os
# Set path of data files
path = "data"
if not os.path.exists(path + "/trainResized64"):
os.makedirs(path + "/trainResized64")
if not os.path.exists(path + "/testResized64"):
os.makedirs(path + "/testResized64")
img_size = 64
trainFiles = glob.glob(path + "/train/*")
for i, nameFile in enumerate(trainFiles):
image = imread(nameFile)
imageResized = resize(image, (img_size, img_size))
newName = "/".join(nameFile.split("/")[:-1]) + "Resized64/" + nameFile.split("/")[-1]
imsave(newName, imageResized)
testFiles = glob.glob(path + "/test/*")
for i, nameFile in enumerate(testFiles):
image = imread(nameFile)
imageResized = resize(image, (img_size, img_size))
newName = "/".join(nameFile.split("/")[:-1]) + "Resized64/" + nameFile.split("/")[-1]
imsave(newName, imageResized)
<file_sep>import glob
import numpy as np
import pandas as pd
from skimage.io import imread
from sklearn.preprocessing import LabelEncoder
from skimage.color import gray2rgb, rgb2gray
from skimage.color.adapt_rgb import adapt_rgb, each_channel
from skimage import filters
from skimage import exposure
import skimage.transform as tf
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
def img_draw(im_arr, im_names, n_imgs):
plt.figure(1)
n_rows = int(np.sqrt(n_imgs))
n_cols = n_imgs / n_rows
for img_i in range(n_imgs):
plt.subplot(n_cols, n_rows, img_i + 1)
plt.title(im_names[img_i].split('/')[-1].split('.')[0])
if len(im_arr.shape) == 4:
img = im_arr[img_i]
else:
img = im_arr[img_i]
plt.imshow(img)
plt.show()
def img_rescale(img, scale):
original_y, original_x = img.shape
if scale > 1:
img = tf.rescale(img, scale, clip=True)
scaled_y, scaled_x = img.shape
dx = (scaled_x - original_x) // 2
dy = (scaled_y - original_y) // 2
img = img[dy: (dy + original_y), dx: (dx + original_x)]
return img
else:
tmp_img = np.zeros(img.shape)
img = tf.rescale(img, scale)
scaled_y, scaled_x = img.shape
tmp_img[((original_y - scaled_y) // 2):((original_y - scaled_y) // 2 + scaled_y),
((original_x - scaled_x) // 2):((original_x - scaled_x) // 2 + scaled_x)] = img
return tmp_img
def img_updown(img, up):
h = img.shape[0]
up_pixels = int(h * up)
tmp_img = np.zeros(img.shape)
if up_pixels > 0:
tmp_img[up_pixels:, :] = img[: - up_pixels, :]
else:
if up_pixels < 0:
tmp_img[: up_pixels, :] = img[-up_pixels:, :]
else:
tmp_img = img
return tmp_img
def img_leftright(img, right):
w = img.shape[1]
right_pixels = int(w * right)
tmp_img = np.zeros(img.shape)
if right_pixels > 0:
tmp_img[:, right_pixels:] = img[:, : (-1 * right_pixels)]
else:
if right_pixels < 0:
tmp_img[:, : right_pixels] = img[:, (-1 * right_pixels):]
else:
tmp_img = img
return tmp_img
def img_rotate(img, rotate, corner_deg_chance):
rot_chance = np.random.random()
if rot_chance < corner_deg_chance:
return tf.rotate(img, 90)
if corner_deg_chance <= rot_chance < (corner_deg_chance * 2):
return tf.rotate(img, 180)
if (corner_deg_chance * 2) <= rot_chance < (corner_deg_chance * 3):
return tf.rotate(img, 270)
return tf.rotate(img, rotate)
def img_draw_test(im_arr, im_names, n_imgs):
plt.figure(1)
n_rows = int(np.sqrt(n_imgs))
n_cols = n_imgs / n_rows
for img_i in range(n_imgs):
plt.subplot(n_cols, n_rows, img_i + 1)
plt.title(im_names[img_i])
if len(im_arr.shape) == 4:
img = im_arr[img_i]
else:
img = im_arr[img_i]
plt.imshow(img)
plt.show()
def imp_img(img_name):
# read
img = imread(img_name)
# if gray convert to color
if len(img.shape) == 2:
img = gray2rgb(img)
return img
@adapt_rgb(each_channel)
def sobel_each(image):
return filters.sobel(image)
@adapt_rgb(each_channel)
def rescale_intensity_each(image, low, high):
plow, phigh = np.percentile(img_file, (low, high))
return np.clip(exposure.rescale_intensity(image, in_range=(plow, phigh)), 0, 1)
"""
Vars
"""
submit_name = 'ImageDataGenerator_128.csv'
debug = False
n_fold = 2
debug_n = 100
train_times = 5
"""
Import images
"""
img_size = 30
# Train
path = "data"
train_names = sorted(glob.glob(path + "/trainResized/*"))
train_labels_index = pd.DataFrame.from_csv('trainLabels.csv')
train_files = np.zeros((len(train_names), img_size, img_size, 3)).astype('float32')
train_labels = np.zeros((len(train_names),)).astype(str)
for i, name_file in enumerate(train_names):
image = imp_img(name_file)
train_files[i, :, :, :] = image
train_labels[i] = train_labels_index.loc[int(name_file.split('.')[0].split('/')[-1])]['Class']
# Test
test_names = sorted(glob.glob(path + "/testResized/*"))
test_files = np.zeros((len(test_names), img_size, img_size, 3)).astype('float32')
for i, name_file in enumerate(test_names):
image = imp_img(name_file)
test_files[i, :, :, :] = image
train_files /= 255
test_files /= 255
label_encoder = LabelEncoder()
train_labels = label_encoder.fit_transform(train_labels)
print(train_files.shape, test_files.shape)
print(np.unique(train_labels))
"""
Image processing
"""
if debug:
img_draw(train_files, train_names, debug_n)
# Contrast streching
for i, img_file in enumerate(train_files):
train_files[i, :, :, :] = rescale_intensity_each(img_file, 20, 80)
for i, img_file in enumerate(test_files):
test_files[i, :, :, :] = rescale_intensity_each(img_file, 20, 80)
if debug:
img_draw(train_files, train_names, debug_n)
# Find and borders
for i, img_file in enumerate(train_files):
train_files[i, :, :, :] = sobel_each(img_file)
for i, img_file in enumerate(test_files):
test_files[i, :, :, :] = sobel_each(img_file)
# Contrast streching
for i, img_file in enumerate(train_files):
train_files[i, :, :, :] = rescale_intensity_each(img_file, 5, 95)
for i, img_file in enumerate(test_files):
test_files[i, :, :, :] = rescale_intensity_each(img_file, 5, 95)
if debug:
img_draw(train_files, train_names, debug_n)
train_files_gray = np.zeros((len(train_names), img_size, img_size)).astype('float32')
test_files_gray = np.zeros((len(test_names), img_size, img_size)).astype('float32')
# Change to gray
for i, img_file in enumerate(train_files):
train_files_gray[i, :, :] = rgb2gray(img_file)
for i, img_file in enumerate(test_files):
test_files_gray[i, :, :] = rgb2gray(img_file)
if debug:
img_draw(train_files_gray, train_names, debug_n)
# Contrast streching
for i, img_file in enumerate(train_files_gray):
p0, p100 = np.percentile(img_file, (0, 100))
train_files_gray[i, :, :] = exposure.rescale_intensity(img_file, in_range=(p0, p100))
for i, img_file in enumerate(test_files_gray):
p0, p100 = np.percentile(img_file, (0, 100))
test_files_gray[i, :, :] = exposure.rescale_intensity(img_file, in_range=(p0, p100))
if debug:
img_draw(train_files_gray, train_names, debug_n)
"""
Configure train/test
"""
np.random.seed(2016)
i_part = 1.0 / n_fold
batch_size = 128
nb_classes = 62
nb_epoch = 120
np.random.seed(7)
cv_prob = np.random.sample(train_files_gray.shape[0])
# input image dimensions
img_rows, img_cols = img_size, img_size
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
# lr
lr_updates = {0: 0.03, 40: 0.01, 80: 0.003}
# convert class vectors to binary class matrices
train_labels_dummy = np_utils.to_categorical(train_labels, nb_classes)
test_results = []
acc = []
for i_fold in range(n_fold):
test_cv_ind = np.logical_and(i_fold * i_part <= cv_prob, (i_fold + 1) * i_part > cv_prob)
train_cv_ind = np.logical_not(np.logical_and(i_fold * i_part <= cv_prob, (i_fold + 1) * i_part > cv_prob))
X_train, y_train = train_files_gray[train_cv_ind, :, :], train_labels[train_cv_ind]
X_test, y_test = train_files_gray[test_cv_ind, :, :], train_labels[test_cv_ind]
"""
Compile Model
"""
# the data, shuffled and split between train and test sets
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
np.random.seed(1007) # for reproducibility
model = Sequential()
"""
inner layers start
"""
model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='valid', input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
"""
inner layers stop
"""
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
sgd = SGD(lr=0.03, decay=1e-4, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
model.reset_states()
for epoch_i in range(nb_epoch):
X_train_cp = np.array(X_train, copy=True)
print('Epoch %d' % epoch_i)
if epoch_i in lr_updates:
print('lr changed to %f' % lr_updates[epoch_i])
model.optimizer.lr.set_value(lr_updates[epoch_i])
np.random.seed(epoch_i)
rotate_angle = np.random.normal(0, 5, X_train_cp.shape[0])
rescale_fac = np.random.normal(1.05, 0.1, X_train_cp.shape[0])
right_move = np.random.normal(0, 0.1, X_train_cp.shape[0])
up_move = np.random.normal(0, 0.1, X_train_cp.shape[0])
shear = np.random.normal(0, 10, X_train_cp.shape[0])
shear = np.deg2rad(shear)
for img_i in range(X_train_cp.shape[0]):
afine_tf = tf.AffineTransform(shear=shear[img_i])
X_train_cp[img_i, 0] = tf.warp(X_train_cp[img_i, 0], afine_tf)
X_train_cp[img_i, 0] = img_rotate(X_train_cp[img_i, 0], rotate_angle[img_i], 0.03)
X_train_cp[img_i, 0] = img_rescale(X_train_cp[img_i, 0], rescale_fac[img_i], )
X_train_cp[img_i, 0] = img_leftright(X_train_cp[img_i, 0], right_move[img_i])
X_train_cp[img_i, 0] = img_updown(X_train_cp[img_i, 0], up_move[img_i])
# img_draw(X_train_cp[:, 0, :, :], label_encoder.inverse_transform(y_train), 100)
for batch_i in range(0, X_train_cp.shape[0], batch_size):
if (batch_i + batch_size) < X_train_cp.shape[0]:
model.train_on_batch(X_train_cp[batch_i: batch_i + batch_size], Y_train[batch_i: batch_i + batch_size],
accuracy=True)
else:
model.train_on_batch(X_train_cp[batch_i:], Y_train[batch_i:], accuracy=True)
score = model.evaluate(X_train, Y_train, verbose=0, show_accuracy=True)
print('Train score: %.2f, Train accuracy: %.3f' % (score[0], score[1]))
score = model.evaluate(X_test, Y_test, verbose=0, show_accuracy=True)
print('Test score: %.2f, Test accuracy: %.3f' % (score[0], score[1]))
"""
Get accuracy
"""
score = model.evaluate(X_test, Y_test, verbose=0, show_accuracy=True)
print('Test score:', score[0])
print('Test accuracy:', score[1])
acc.append(score[1])
predicted_results = model.predict_classes(X_test, batch_size=batch_size, verbose=1)
print(label_encoder.inverse_transform(predicted_results))
print(label_encoder.inverse_transform(y_test))
unsuccesful_predict = np.logical_not(predicted_results == y_test)
# img_draw_test(X_test[unsuccesful_predict, 0, :, :], label_encoder.inverse_transform(y_test[unsuccesful_predict]),
# debug_n)
if n_fold > 1:
print('The accuracy is %.3f' % np.mean(acc))
"""
Solve and submit test
"""
np.random.seed(1007) # for reproducibility
# the data, shuffled and split between train and test sets
train_files_gray = train_files_gray.reshape(train_files.shape[0], 1, img_rows, img_cols)
test_files_gray = test_files_gray.reshape(test_files.shape[0], 1, img_rows, img_cols)
# Fit the whole train data
model.compile(loss='categorical_crossentropy', optimizer=sgd)
model.reset_states()
for epoch_i in range(nb_epoch):
X_train_cp = np.array(train_files_gray, copy=True)
print('Epoch %d' % epoch_i)
if epoch_i in lr_updates:
print('lr changed to %f' % lr_updates[epoch_i])
model.optimizer.lr.set_value(lr_updates[epoch_i])
np.random.seed(epoch_i)
rotate_angle = np.random.normal(0, 5, X_train_cp.shape[0])
rescale_fac = np.random.normal(1.05, 0.1, X_train_cp.shape[0])
right_move = np.random.normal(0, 0.1, X_train_cp.shape[0])
up_move = np.random.normal(0, 0.1, X_train_cp.shape[0])
shear = np.random.normal(0, 10, X_train_cp.shape[0])
shear = np.deg2rad(shear)
for img_i in range(X_train_cp.shape[0]):
afine_tf = tf.AffineTransform(shear=shear[img_i])
X_train_cp[img_i, 0] = tf.warp(X_train_cp[img_i, 0], afine_tf)
X_train_cp[img_i, 0] = img_rotate(X_train_cp[img_i, 0], rotate_angle[img_i], 0.03)
X_train_cp[img_i, 0] = img_rescale(X_train_cp[img_i, 0], rescale_fac[img_i], )
X_train_cp[img_i, 0] = img_leftright(X_train_cp[img_i, 0], right_move[img_i])
X_train_cp[img_i, 0] = img_updown(X_train_cp[img_i, 0], up_move[img_i])
# img_draw(X_train_cp[:, 0, :, :], label_encoder.inverse_transform(y_train), 100)
for batch_i in range(0, X_train_cp.shape[0], batch_size):
if (batch_i + batch_size) < X_train_cp.shape[0]:
model.train_on_batch(X_train_cp[batch_i: batch_i + batch_size],
train_labels_dummy[batch_i: batch_i + batch_size], accuracy=True)
else:
model.train_on_batch(X_train_cp[batch_i:], train_labels_dummy[batch_i:], accuracy=True)
predicted_results = model.predict_classes(test_files_gray, batch_size=batch_size, verbose=1)
predicted_results = label_encoder.inverse_transform(predicted_results)
test_index = []
for file_name in test_names:
test_index.append(int(file_name.split('.')[0].split('/')[-1]))
sub_file = pd.DataFrame.from_csv('sampleSubmission.csv')
sub_file.Class = predicted_results
sub_file.index = test_index
sub_file.index.name = 'ID'
sub_file.to_csv(submit_name)
# each_border -> rgb2gray: 0.7018
# each_rescale_intensity -> each_border -> rgb2gray: 0.7081
# each_rescale_intensity -> each_border -> each_rescale_intensity -> rgb2gray:
# Epoch 15, val_loss: 1.2972 - val_acc: 0.7069
# each_rescale_intensity -> each_border -> rgb2gray -> rescale_intensity: Epoch 12, val_loss: 1.2152 - val_acc: 0.7037
# each_equalize_hist -> each_border -> rgb2gray -> equalize_hist: Epoch 16, val_loss: 1.2984 - val_acc: 0.6846
# each_rescale_intensity -> each_border -> rgb2gray -> rescale_intensity, sobel(10, 90):
# Epoch 18, val_loss: 1.2860 - val_acc: 0.7094
|
af0b48b2c7c685cb258091e76fbe245a3cdc275c
|
[
"Python"
] | 4 |
Python
|
yairbeer/google_julia
|
be38278505a7e73d7a5ebc8f718c1adb6fa3d3d7
|
15f1c452544c10c2c76a7039fe4f65d90609d946
|
refs/heads/master
|
<repo_name>ANamelessWolf/urabe<file_sep>/src/ConnectionError.php
<?php
/**
* Class ConnectionError
*
* @package URABE-API
* @author <NAME> <<EMAIL>>
* @version v.1.1 (01/10/2019)
* @copyright copyright (c) 2018-2020, Nameless Studios
*/
//namespace nameless\urabe;
/**
* A connection database error
* Can be caused by a bad connection or bad request
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
class ConnectionError
{
const IGNORE_STMT_ORACLE = "statement";
const IGNORE_STMT_PG = "resource";
const IGNORE_CONN_PG = "connection";
/**
* @var int $code The last error code number.
*/
public $code;
/**
* @var string $message The database connection error text.
*/
public $message;
/**
* @var string $sql The SQL statement text. If there was no statement, this is an empty string.
*/
public $sql;
/**
* @var string $file The file where the error was found
*/
public $file;
/**
* @var int The line where the error was found
*/
public $line;
/**
* @var array The error context
*/
private $err_context;
/**
* Gets the error context if the urabe settings allows
* to print error settings
*
* @return mixed The error context
*/
public function get_err_context()
{
//resource field can not be serialized, it has to be removed to avoid problems echoing the response
$ignoreParams = array(self::IGNORE_STMT_ORACLE, self::IGNORE_STMT_PG, self::IGNORE_CONN_PG);
foreach ($ignoreParams as &$key)
if (array_key_exists($key, $this->err_context)) {
unset($this->err_context[$key]);
}
return KanojoX::$settings->show_error_context ? $this->err_context : null;
}
/**
* Sets the error context
* @param mixed $value The error context
* @return void
*/
public function set_err_context($value)
{
return $this->err_context = $value;
}
/**
* Formats an exception error
*
* @return array The exception error is a mixed array
*/
public function get_exception_error()
{
if (KanojoX::$settings->show_error_details) {
$err_context = array();
if (KanojoX::$settings->show_error_context)
foreach (KanojoX::$errors as &$error) {
$context = is_null($error->sql) ? array(
NODE_MSG => $error->message,
NODE_CODE => $error->code,
NODE_FILE => $error->file,
NODE_LINE => $error->line,
NODE_ERROR_CONTEXT => $error->get_err_context()
) : array(
NODE_CODE => $error->code,
NODE_FILE => $error->file,
NODE_LINE => $error->line,
NODE_ERROR_CONTEXT => $error->get_err_context(),
NODE_QUERY => $error->sql
);
array_push($err_context, array(NODE_ERROR => $context));
}
if (isset($this->sql))
return array(NODE_QUERY => $this->sql, NODE_CODE => $this->code, NODE_FILE => $this->file, NODE_LINE => $this->line, NODE_ERROR_CONTEXT => $err_context);
else
return array(NODE_CODE => $this->code, NODE_FILE => $this->file, NODE_LINE => $this->line, NODE_ERROR_CONTEXT => $err_context);
} else
return null;
}
}
?><file_sep>/src/DBDriver.php
<?php
require_once "Enum.php";
/**
* Database connection drivers supported by URABE API
* The collection of enums that manages URABE API
* @api Makoto Urabe DB Manager DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
abstract class DBDriver extends Enum
{
/**
* @var string NS
* Not supported driver
*/
const NS = -1;
/**
* @var string ORACLE
* ORACLE driver connection with OCI Functions
*/
const ORACLE = 0;
/**
* @var string PG
* PG connection with libPQ Library
*/
const PG = 1;
/**
* @var string MYSQL
* The mysqli extension allows you to access the
* functionality provided by MySQL 4.1 and above.
*/
const MYSQL = 2;
}
?><file_sep>/src/HasamiRESTfulService.php
<?php
include_once "Urabe.php";
/**
* Hasami Restful Service Class
* This class creates and manage a simple REST service that makes a transaction to supported database or
* execute a defined action
*
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
class HasamiRESTfulService
{
/**
* The web service request content
*
* @var WebServiceContent The web service content
*/
public $data;
/**
* @var Urabe The database manager
*/
public $urabe;
/**
* @var HasamiWrapper The Web service manager
*/
public $wrapper;
/**
* @var callback|string Defines the service task, when the service is a callback the method
* has to be defined as follows.
* function UrabeResponse (WebServiceContent $data, Urabe $urabe);
* When the service task is given as a string the action is directly called from the defined wrapper
*/
public $service_task;
/**
* __construct
*
* Initialize a new instance of the Hasami Restful service class.
*
* @param WebServiceContent $data The web service content
* @param Urabe $urabe The database manager
*/
public function __construct($data = null, $urabe = null)
{
$this->data = $data;
$this->urabe = $urabe;
}
/**
* This function validates that the body contains all the given properties.
* The properties may refer to the column names and must match name and case
* @param array $properties The properties that must be contained in the body, as an array of strings
* @throws Exception An Exception is thrown if the body is null or the body does not contains all properties
* @return void
*/
public function validate_body(...$properties)
{
if (is_null($this->data->body))
throw new Exception(ERR_BODY_IS_NULL);
for ($i = 0; $i < count($properties); $i++) {
$property_name = $properties[$i];
if (!$this->data->in_body($property_name))
throw new Exception(sprintf(ERR_INCOMPLETE_BODY, $property_name));
}
}
/**
* This method validates the columns contained in the given node.
* It's expected that the body contains the passed node and the node contain a columns property
* @param string $property_name The name of the property that contains the columns property in the body
* @param array $obligatory_columns An array of column names that must exists in the columns property
* @throws Exception An Exception is thrown if the body is null or the body does not contains all fields
* @return void
*/
public function validate_columns($property_name, $obligatory_columns)
{
if (is_null($this->data->body))
throw new Exception(ERR_BODY_IS_NULL);
if (!property_exists($this->data->body, $property_name))
throw new Exception(sprintf(ERR_INCOMPLETE_BODY, $property_name));
if (!property_exists($this->data->body->{$property_name}, NODE_COLS))
throw new Exception(sprintf(ERR_INCOMPLETE_DATA, $property_name, NODE_COLS));
$columns = $this->data->body->{$property_name}->{NODE_COLS};
//Columns must contain all obligatory columns
foreach ($obligatory_columns as &$column)
if (!in_array($column, $columns))
throw new Exception(sprintf(ERR_INCOMPLETE_DATA, NODE_COLS, implode(', ', $obligatory_columns)));
}
/**
* Gets the service response
* @throws Exception An Exception is thrown when the service task is not defined or an error occurs
* during the callback
* @return UrabeResponse The web service response
*/
public function get_response()
{
if (is_null($this->service_task))
throw new Exception(ERR_INVALID_SERVICE_TASK);
else if (is_string($this->service_task))
$result = $this->wrapper->{$this->service_task}($this->data, $this->urabe);
else if (!is_null($this->service_task))
$result = call_user_func_array($this->service_task, array($this->data, $this->urabe));
else
throw new Exception(ERR_BAD_RESPONSE);
return $result;
}
}<file_sep>/src/resources/WaraiMessages_en.php
<?php
/**
* Defines info and error messages relative to the Urabe API.
* @version 1.0.0
* @api Makoto Urabe DB Manager Oracle
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
/***************************************
**************** Info *****************
***************************************/
/**
* @var string INF_SELECT
* The message response for a successful query.
*/
const INF_SELECT = 'Selection succeed';
/***************************************
**************** Error ****************
***************************************/
/**
* @var string ERR_SAVING_JSON
* The error message sent when an error ocurred saving a JSON object
*/
const ERR_SAVING_JSON = 'An error ocurred saving the JSON object';
/**
* @var string ERR_NOT_IMPLEMENTED
* The error message sent when method is not implemented.
*/
const ERR_NOT_IMPLEMENTED = 'The method "%s", is not implemented in the class "%s".';
/**
* @var string ERR_BAD_RESPONSE
* The error message sent when the service returns a bad response
*/
const ERR_BAD_RESPONSE = 'The web service returns a bad response';
/**
* @var string ERR_BAD_QUERY
* The error message sent when a bad query is executed.
*/
const ERR_BAD_QUERY = 'Bad query: %s.';
/**
* @var string ERR_READING_JSON_FILE
* The error message sent when a JSON file can not be parsed.
*/
const ERR_READING_JSON_FILE = "Error reading the JSON file from '%s'.";
/**
* @var string ERR_BAD_URL
* The error message sent when the url can be parsed
*/
const ERR_BAD_URL = 'The url has an invalid format.';
/**
* @var string ERR_BODY_IS_NULL
* The error message sent when the the body is null.
*/
const ERR_BODY_IS_NULL = 'An error ocurred parsing the message body. The body message is null or invalid.';
/**
* @var string ERR_BAD_CONNECTION
* The error message sent when the connection is not valid.
*/
const ERR_BAD_CONNECTION = 'Invalid database connection.';
/**
* @var string ERR_NOT_CONNECTED
* The error message sent when the KanojoX connector is not connected.
*/
const ERR_NOT_CONNECTED = 'No connection to the database, did you use connect()?';
/**
* @var string ERR_INCOMPLETE_DATA
* The error message sent when the the node is missing data.
*/
const ERR_INCOMPLETE_DATA = 'The %s does not contain enough data. Needed values [%s].';
/**
* @var string ERR_INCOMPLETE_BODY
* The error message sent when the the body doesn't has a property
*/
const ERR_INCOMPLETE_BODY = 'The properties [%s] were not found in the body.';
/**
* @var string ERR_MISSING_CONDITION
* The error message sent when the the condition is not defined
*/
const ERR_MISSING_CONDITION = 'A condition is needed to %s';
/**
* @var string ERR_INVALID_SERVICE
* The error message sent when no service name is specified.
*/
const ERR_INVALID_SERVICE = 'No service specified.';
/**
* @var string ERR_INVALID_SERVICE_TASK
* The error message sent when trying to get a service response with no task
*/
const ERR_INVALID_SERVICE_TASK = 'No service task specified for the current service.';
/**
* @var string ERR_INVALID_ACTION
* The error message sent when trying to call a not implemented action
*/
const ERR_INVALID_ACTION = 'No action is implemented in this web service with the name %s. ';
/**
* @var string ERR_INVALID_SERVICE
* The error message sent when no service name is specified.
*/
const ERR_SERVICE_RESTRICTED = 'This service can not be access via the verbose %s.';
/**
* @var string ERR_VERBOSE_NOT_SUPPORTED
* The error message sent when the request method is not supported
*/
const ERR_VERBOSE_NOT_SUPPORTED = 'This service does not support the verbose %s.';
/**
* @var string ERR_SERVICE_RESPONSE
* The error message sent when an exception occurred during a web request
*/
const ERR_SERVICE_RESPONSE = 'Error executing the service.';
?><file_sep>/src/JsonPrettyPrint.php
<?php
require_once "JsonPrettyStyle.php";
/**
* Json Pretty Print Class
*
* This class creates a HTML format from a JSON object
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
class JsonPrettyPrint
{
/**
* @var string PLUS_BUTTON
* HTML classes to insert a plus glyph icon
*/
const PLUS_BUTTON = 'glyphicon glyphicon-plus-sign';
/**
* @var string GLYPH_BUTTON
* glyph icon HTML snippet
*/
const GLYPH_BUTTON = '<a href="#group_%s" class="%s" data-toggle="collapse" style="padding-left:%spx;"></a>';
/**
* @var string COLLAPSE_AREA_OPEN
* Initial area group HTML snippet
*/
const COLLAPSE_AREA_OPEN = '<div id="group_%s" class="collapse in">';
/**
* @var string COLLAPSE_AREA_CLOSE
* Close area group HTML snippet
*/
const COLLAPSE_AREA_CLOSE = '</div>';
/**
* @var string HTML_FORMAT_FONT_LIGHTER
* HTML snippet for writing a lighter text
*/
const HTML_FORMAT_FONT_LIGHTER = '<span style="color:%s; padding-left:%spx; font-weight:lighter">';
/**
* @var string HTML_FORMAT_FONT_BOLD
* HTML snippet for writing a bold text
*/
const HTML_FORMAT_FONT_BOLD = '<span style="color:%s; padding-left:%spx; font-weight:bold">';
/**
* @var string HTML_FORMAT_CLOSE
* HTML snippet for closing a written text
*/
const HTML_FORMAT_CLOSE = '</span>';
/**
* @var string LEFT_PADDING_PX
* Defines the TAB padding size
*/
const LEFT_PADDING_PX = "20";
/**
* Undocumented variable
*
* @var integer
*/
private $groupIndex = 0;
/**
* Defines the given JSON Style
*
* @var JsonPrettyStyle The JSON Style
*/
public $style;
/**
* __construct
*
* Initialize a new instance of the JSON pretty print class.
* @param JsonPrettyStyle $style The JSON pretty format style
*/
public function __construct($style = null)
{
if (is_null($style))
$this->style = KanojoX::$settings->default_pp_style;
else
$this->style = $style;
}
/**
* Gets the pretty print format from a JSON object
*
* @param object $json The JSON object
* @return string The JSON formatted in the pretty print format
*/
public function get_format($json)
{
$html = $this->format_json($json, 0);
return $html;
}
/**
* Formats a JSON object at a given depth level
* @param object $json The JSON object
* @param int $level The JSON level depth
* @return string The JSON formatted in the pretty print format
*/
public function format_json($json, $level)
{
$html = "";
if (is_object($json))
$html .= $this->format_object($json, $level);
else if (is_array($json))
$html .= $this->format_array($json, $level);
else
$html .= $this->format_value($json, 0);
return $html;
}
/**
* Formats a JSON object at a given depth level and desired tab offset
* @param object $json The JSON object
* @param int $level The JSON level depth
* @param offset $offset The JSON tab offset
* @return string The JSON formatted in the pretty print format
*/
public function format_object($json, $level, $offset = 0)
{
$html = "";
$html .= $this->new_line($html);
$html = $this->open_group("{", $offset);
$properties = array_keys(get_object_vars($json));
for ($i = 0; $i < count($properties); $i++) {
$html .= $this->new_line($html);
$html .= $this->print_property($properties[$i], $level + 1);
$html .= $this->print_symbol(" : ", 0);
$html .= $this->format_json($json->{$properties[$i]}, $level + 1);
$html = $this->append_comma($i, count($properties), $html);
}
$html .= $this->new_line($html);
$html .= $this->print_symbol("}", $level);
$html .= $this->close_group();
return $html;
}
/**
* Formats a JSON array at a given depth level and desired tab offset
* @param array $array The JSON array
* @param int $level The JSON level depth
* @param offset $offset The JSON tab offset
* @return string The JSON formatted in the pretty print format
*/
public function format_array($array, $level, $offset = 0)
{
$html = "";
if (count($array) == 0)
$html .= $this->print_symbol(" [ ] ", 0);
else {
$keys = array_keys($array);
$is_array_of_objects = is_string($keys[0]);
$symbol = ($is_array_of_objects ? "{" : "[");
$html = $this->open_group(" $symbol", $offset);
for ($i = 0; $i < count($array); $i++) {
if (is_string($keys[$i])) {
$html .= $this->new_line($html);
$html .= $this->print_property($keys[$i], $level + 1);
$html .= $this->print_symbol(" : ", 0);
$html .= $this->format_json($array[$keys[$i]], $level + 1);
} else {
$html .= $this->new_line($html);
$html .= $this->format_value($array[$keys[$i]], $level + 1);
}
$html = $this->append_comma($i, count($keys), $html);
}
$html .= $this->new_line($html);
$symbol = ($is_array_of_objects ? "}" : "]");
$html .= $this->print_symbol("$symbol", $level);
$html .= $this->close_group();
}
return $html;
}
/**
* Formats a JSON value at a given depth level
* @param mixed $value The JSON value
* @param int $level The JSON level depth
* @return string The JSON formatted in the pretty print format
*/
public function format_value($value, $level)
{
$html = "";
if (is_string($value)) {
if (strtolower($value) == "true" || strtolower($value) == "false")
$html .= $this->print_bool_value($value == "true", $level);
else
$html .= $this->print_text_value($value, $level);
} else if (is_numeric($value))
$html .= $this->print_number_value($value, $level);
else if (is_null($value))
$html .= $this->print_null_value($level);
else
$html .= $this->format_array($value, $level, $level);
return $html;
}
/*************************************
* Values are formatted with span tag*
*************************************/
/**
* Opens a JSON group that can be collapsed via clicking the a glyph icon
*
* @param string $symbol The symbol that opens the group can be a "{" or a "["
* @param int $level The JSON level depth
* @return string The html snippet
*/
private function open_group($symbol, $level)
{
$html = "";
$html .= sprintf(self::GLYPH_BUTTON, ++$this->groupIndex, self::PLUS_BUTTON, $level * self::LEFT_PADDING_PX);
$html .= sprintf(self::HTML_FORMAT_FONT_BOLD . '%s' . self::HTML_FORMAT_CLOSE, $this->style->symbol_color, 0, " " . $symbol);
$html .= sprintf(self::COLLAPSE_AREA_OPEN, $this->groupIndex);
return $html;
}
/**
* Returns the HTML close group tag
*
* @return string The html snippet
*/
private function close_group()
{
return self::COLLAPSE_AREA_CLOSE;
}
/**
* Prints a symbol with the pretty JSON format.
*
* @param string $symbol The symbol to print
* @param int $level The JSON level depth
* @return string The html snippet
*/
private function print_symbol($symbol, $level)
{
return sprintf(self::HTML_FORMAT_FONT_BOLD . '%s' . self::HTML_FORMAT_CLOSE, $this->style->symbol_color, $level * self::LEFT_PADDING_PX, $symbol);
}
/**
* Prints a property name with the pretty JSON format.
*
* @param string $property The property name
* @param int $level The JSON level depth
* @return string The html snippet
*/
private function print_property($property, $level)
{
return sprintf(self::HTML_FORMAT_FONT_LIGHTER . '"%s"' . self::HTML_FORMAT_CLOSE, $this->style->property_name_color, $level * self::LEFT_PADDING_PX, $property);
}
/**
* Prints a text value with the pretty JSON format.
*
* @param string $text The text value
* @param int $level The JSON level depth
* @return string The html snippet
*/
private function print_text_value($text, $level)
{
return sprintf(self::HTML_FORMAT_FONT_LIGHTER . '"%s"' . self::HTML_FORMAT_CLOSE, $this->style->text_value_color, $level * self::LEFT_PADDING_PX, $text);
}
/**
* Prints a null value with the pretty JSON format.
*
* @param int $level The JSON level depth
* @return string The html snippet
*/
private function print_null_value($level)
{
return sprintf(self::HTML_FORMAT_FONT_BOLD . 'null' . self::HTML_FORMAT_CLOSE, $this->style->null_value_color, $level * self::LEFT_PADDING_PX, $level);
}
/**
* Prints a number value with the pretty JSON format.
*
* @param string $number The number value.
* @param int $level The JSON level depth
* @return string The html snippet
*/
private function print_number_value($number, $level)
{
return sprintf(self::HTML_FORMAT_FONT_BOLD . '%s' . self::HTML_FORMAT_CLOSE, $this->style->number_value_color, $level * self::LEFT_PADDING_PX, $number);
}
/**
* Prints a boolean value with the pretty JSON format.
*
* @param string $bool The boolean value.
* @param int $level The JSON level depth
* @return string The html snippet
*/
private function print_bool_value($bool, $level)
{
return sprintf(self::HTML_FORMAT_FONT_BOLD . '%s' . self::HTML_FORMAT_CLOSE, $this->style->boolean_value_color, $level * self::LEFT_PADDING_PX, $bool ? "true" : "false");
}
/**
* Inserts a new line in HTML tag if the previous item is a collapse are TAG
*
* @param string $html The html code
* @return string The html snippet
*/
private function new_line($html)
{
$closeDivSize = strlen(self::COLLAPSE_AREA_CLOSE);
$openDivSize = strlen(sprintf(self::COLLAPSE_AREA_OPEN, $this->groupIndex));
$htmlLen = strlen($html);
$last_tag_was_div_open = strlen($html) > $openDivSize && substr($html, strlen($html) - $openDivSize) == sprintf(self::COLLAPSE_AREA_OPEN, $this->groupIndex);
//No spaces after div open or close tags
$last_tag_was_div_close = strlen($html) > $closeDivSize && substr($html, strlen($html) - $closeDivSize) == self::COLLAPSE_AREA_CLOSE;
if (!$last_tag_was_div_open && !$last_tag_was_div_close)
return "<br>";
else
return "";
}
/**
* Appends a comma at the end of a given element
*
* @param int $index The element index
* @param int $elements_count The total number of elements
* @param string $html The html code
* @return string The html code with an appended comma
*/
private function append_comma($index, $elements_count, $html)
{
if ($index < ($elements_count - 1)) {
$closeDivSize = strlen(self::COLLAPSE_AREA_CLOSE);
$last_tag_was_div = strlen($html) > $closeDivSize && substr($html, strlen($html) - $closeDivSize) == self::COLLAPSE_AREA_CLOSE;
if ($last_tag_was_div) {
$html = substr($html, 0, strlen($html) - strlen(self::COLLAPSE_AREA_CLOSE));
$html .= $this->print_symbol(",", 0);
$html .= $this->close_group();
} else
$html .= $this->print_symbol(", ", 0);
}
return $html;
}
}
<file_sep>/src/IHasami.php
<?php
/**
* This interface allows to manage access to a Restful Service
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
interface IHasami
{
/**
* Gets the database manager
*
* @return Urabe The database manager
*/
public function get_urabe();
/**
* Gets the web service request content
*
* @return WebServiceContent Returns the web service content
*/
public function get_request_data();
/**
* Gets the table name
*
* @return string Returns the table name
*/
public function get_table_name();
/**
* Gets the table INSERT column names
*
* @return array Returns the column names in an array of strings
*/
public function get_insert_columns();
/**
* Gets the column name used as primary key
*
* @return string Returns the column name
*/
public function get_primary_key_column_name();
/**
* Gets the selection filter, used by the GET service
* in its default mode
*
* @return string Returns the column filter
*/
public function get_selection_filter();
/**
* Sets the selection filter, used by the GET service
* in its default mode
* @param string $condition The filter condition
* @return string Returns the column name
*/
public function set_selection_filter($condition);
/**
* Gets the service manager by the verbose type
* @param string $verbose The service verbose type
* @return HasamiRESTfulService The service manager
*/
public function get_service($verbose);
/**
* Gets the service status assigned to the given service
* @param string $verbose The service verbose type
* @return ServiceStatus The service current status
*/
public function get_service_status($verbose);
/**
* Sets the service status to the given service name
* @param string $verbose The service verbose type
* @param ServiceStatus $status The service status
* @return void
*/
public function set_service_status($verbose, $status);
}
?><file_sep>/src/BooleanFieldDefinition.php
<?php
/**
* Class BooleanFieldDefinition | FieldDefinition.php
*
* @package URABE-API
* @author <NAME> <<EMAIL>>
* @version v.1.1 (01/10/2019)
* @copyright copyright (c) 2018-2020, Nameless Studios
*/
include_once "FieldDefinition.php";
/**
* String Field Definition Class
*
* This class encapsulates a table column definition and format it values to JSON field value
* Each table field is associated to a column and stores its index and data type.
*
* @api Makoto Urabe DB Manager
*/
class BooleanFieldDefinition extends FieldDefinition
{
/**
* Initialize a new instance of a Field Definition class
*
* @param string $index The column index
* @param string $column The column name
* @param string $data_type The data type name
*/
public function __construct($index, $column, $data_type)
{
parent::__construct($index, $column, $data_type);
}
/**
* Gets the value from a string in the row definition data type
*
* @param string $value The selected value as string
* @return boolean The value formatted as a boolean
*/
public function get_value($value)
{
if (is_null($value))
return null;
else if (strval(strtolower($value) == 'true') || strval(strtolower($value) == 'false'))
return strval(strtolower($value)) == 'true';
else
return intval($value) == 1;
}
/**
* Formats a value to be use as a place holder parameter
*
* @param DBDriver $driver The selected value as string
* @param mixed $value The selected value as string
* @return mixed The value as the same type of the table definition.
*/
public function format_value($driver, $value)
{
if ($this->data_type == PARSE_AS_BOOLEAN)
return $driver == DBDriver::PG ? ($value == true ? "t" : "f") : ($value == true ? 1 : 0);
else
return parent::format_value($driver, $value);
}
}
?><file_sep>/src/POSTService.php
<?php
include_once "HasamiRESTfulService.php";
/**
* POST Service Class
* This class defines a restful service with a request verbose POST.
* This method is often used to update or access protected data from the database.
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
class POSTService extends HasamiRESTfulService
{
/**
* @var string The update condition
*/
public $update_condition;
/**
* __construct
*
* Initialize a new instance of the POST Service class.
* A default service task is defined as a callback using the function POSTService::default_POST_action
*
* @param IHasami $wrapper The web service wrapper
* @param string $update_condition The delete condition
*/
public function __construct($wrapper, $update_condition = null)
{
$data = $wrapper->get_request_data();
$data->extra->{TAB_NAME} = $wrapper->get_table_name();
$data->extra->{CAP_UPDATE} = is_null($update_condition) ? null : $update_condition;
$urabe = $wrapper->get_urabe();
parent::__construct($data, $urabe);
$this->wrapper = $wrapper;
$this->service_task = function ($data, $urabe) {
return $this->default_POST_action($data, $urabe);
};
}
/**
* Wraps the update function from urabe
* @param string $table_name The table name.
* @param object $values The values to update as column key value paired
* Column names as keys and updates values as associated value, place holders can not be identifiers only values.
* @param string $condition The condition to match
* @throws Exception An Exception is raised if the connection is null or executing a bad query
* @return UrabeResponse Returns the service response formatted as an executed response
*/
public function update($table_name, $values, $condition)
{
return $this->urabe->update($table_name, $values, $condition);
}
/**
* Wraps the update_by_field function from urabe
*
* @param string $table_name The table name.
* @param array $values The values to update as key value pair array.
* Column names as keys and update values as associated value, place holders can not be identifiers only values.
* @param string $column_name The column name used in the condition.
* @param string $column_value The column value used in the condition.
* @throws Exception An Exception is raised if the connection is null or executing a bad query
* @return UrabeResponse Returns the service response formatted as an executed response
*/
public function update_by_field($table_name, $values, $column_name, $column_value)
{
return $this->urabe->update_by_field($table_name, $values, $column_name, $column_value);
}
/**
* Defines the default POST action, by default updates the given values.
* A condition is needed to update values.
* @param WebServiceContent $data The web service content
* @param Urabe $urabe The database manager
* @throws Exception An Exception is thrown if the response can be processed correctly
* @return UrabeResponse The server response
*/
protected function default_POST_action($data, $urabe)
{
try {
$table_name = $data->extra->{TAB_NAME};
//Validate update values
$this->validate_body(NODE_VAL);
$condition = $data->extra->{CAP_UPDATE};
$values = $this->wrapper->format_values($data->body->{NODE_VAL});
//A Condition is obligatory to update
if (is_null($condition))
throw new Exception(sprintf(ERR_MISSING_CONDITION, CAP_UPDATE));
//Get response
$column_name = array_keys($condition)[0];
$column_value = $this->wrapper->format_value($urabe->get_driver(), $column_name, $condition[$column_name]);
$response = $this->update_by_field($table_name, $values, $column_name, $column_value);
return $response;
} catch (Exception $e) {
throw new Exception("Error Processing Request, " . $e->getMessage(), $e->getCode());
}
}
}
?><file_sep>/docs/UrabeDoc.md
# Introduction
This section descries how to use the database manager to execute SQL statements. The class `Urabe` is initialized using a `KanojoX` connector. `Urabe` database manager is oriented to a web service application and the functions returns a web service response. The class allows to select, insert, update, delete or execute whatever SQL statement that can be run using the `Urabe->query()` function.
To explain the use of this class, a table USER will be used as an example.

## Initializing the database connector
This class is constructed using a `KanojoX` class, previously initialized as described in the `KanojoX` section this API supports the functionality to ORACLE, MYSQL and PG. Once this class is constructed an access to the database connector is saved in the property `Urabe::connector`.
### constructor
```php
public function __construct($connector);
```
| Name | Data type | Description |
| - | - | - |
| **`$connector`** | **KanojoX** | The previously initialize `KanojoX` connector. |
**Warnings:** An Exception is thrown if `KanojoX` is not connected or not initialized.
**Example:** Initializing a new instance of Urabe Class
```php
//1: Creates a Kanojo Object used to connect to ORACLE
$kanojo = new ORACLEKanojoX();
$kanojo->init($body->connection);
//2: Create an instance of Urabe connector
$urabe = new Urabe($kanojo);
```
## Executing a query
To simple execute any query used the function `Urabe->query`, it receives an SQL statement wit or without parameters.
```php
function query($sql, $variables = null)
```
### query() function description
| Name | Data type | Description |
| - | - | - |
| **`$sql`** | **string** | The SQL statement |
| **`$variables`** | **array** | The SQL place holder values. |
**Warnings:** An Exception is thrown if this method is called when no connection to the database is available or if the SQL statement is invalid.
**Example:** Execute an insert query
```php
//$kanojo variable has been initialized correctly
$urabe = new Urabe($kanojo);
$result = $urabe->query(
"INSERT INTO testing.users (u_name, u_pass) VALUES ($1, $2)"
array("Mike","<PASSWORD>"));
echo json_encode($result);
```
**return:** The web service response as an encoding _Sting_ of `UrabeResponse` object. You can override the response message via `UrabeResponse->message` before encoding.
The encoded response is:
```json
{
"succeed": true,
"affected_rows": 1,
"result": [],
"error": null,
"query": {
"sql": "INSERT INTO testing.users (u_name, u_pass) VALUES ($1, $2)",
"parameters": [
"Mike",
"<PASSWORD>"
]
}
}
```
## Getting a table definition
`Urabe` has the functionality that allows to select the table definition used to create a `MysteriousParser`. The table definition is returned as an array of `FieldDefinition`. For this example will continue using the table named _USERS_ defined as :
```php
FieldDefinition[] function get_table_definition($table_name);
```
### get_table_definition() function description
| Name | Data type | Description |
| - | - | - |
| **`$table_name`** | **string** | The name of the table |
**Warnings:** An Exception is thrown if this method is called when no connection to the database is available or if the SQL statement is invalid.
**Example:** Select the table definition for USERS table
```php
//For this test the connector is made for PG database
$kanojo = new PGKanojoX();
$kanojo->init($body->connection);
$urabe->connector->schema = "testing";
//Initializing Urabe
$urabe = new Urabe($kanojo);
$result = $urabe->get_table_definition("users");
echo json_encode($result);
```
The output result:
```json
[
{
"column_index": 1,
"column_name": "id",
"data_type": "integer",
"char_max_length": null,
"numeric_precision": 32,
"numeric_scale": 0
},
{
"column_index": 2,
"column_name": "u_name",
"data_type": "character varying",
"char_max_length": 45,
"numeric_precision": null,
"numeric_scale": null
},
{
"column_index": 3,
"column_name": "u_pass",
"data_type": "character varying",
"char_max_length": 45,
"numeric_precision": null,
"numeric_scale": null
}
]
```
## Selecting data from the database
This section describe `Urabe` Class functionality for selecting data, for the following examples will be using a table named _USERS_ defined as follows:
### select()
The default selection for the `Urabe` database manager that returns the selection as a web service response. This method is called via `Urabe->select`, this method execute an SQL selection query and parse the data as defined in the given parser, if no parser is specified it uses the parser defined in the `KanojoX::parser`. The web service response is of type `UrabeResponse`.
```php
UrabeResponse function select($sql, $variables = null, $row_parser = null);
```
#### select() function description
| Name | Data type | Description |
| - | - | - |
| **`$sql`** | **string** | The SQL statement |
| _`$variables`_ | **mixed[]** | The colon-prefixed bind variables placeholder values used in the statement in order. |
| _`$row_parser`_ | **MysteriousParser** |The way the rows are fetched via a parser callback.|
**Warnings:** An Exception is thrown if this method is called when no connection to the database is available or if the SQL statement is invalid.
**Example:** Select all users from the table user
```php
//$kanojo variable has been initialized correctly
$urabe = new Urabe($kanojo);
$result = $urabe->select("SELECT * FROM testing.users");
echo json_encode($result);
```
**return:** The web service response as an encoding _Sting_ of `UrabeResponse` object. You can override the response message via `UrabeResponse->message` before encoding.
The encoded response.
```json
{
"message": "Data selected from user table",
"result": [
{
"id": "1",
"u_name": "user1",
"u_pass": "<PASSWORD>"
},
{
"id": "2",
"u_name": "user2",
"u_pass": "<PASSWORD>"
}
],
"size": 2,
"error": null,
"query": "SELECT * FROM testing.users"
}
```
### select_all()
An alias of select function, execute `select()` with the following parameters.
| Name | value |
| - | - |
| **`$sql`** | `sprintf('SELECT * FROM %s', $table_name)` |
| _`$variables`_ | null |
| _`$row_parser`_ | `$row_parser`|
```php
UrabeResponse function select_all($table_name, $row_parser = null);
```
#### select_all() function description
| Name | Data type | Description |
| - | - | - |
| **`$table_name`** | **string** | The name of the table |
| _`$row_parser`_ | **MysteriousParser** |The way the rows are fetched via a parser callback.|
**Warnings:** An Exception is thrown if this method is called when no connection to the database is available or if the SQL statement is invalid.
**Example:** Select all users from the table user
```php
//$kanojo variable has been initialized correctly
$urabe = new Urabe($kanojo);
$result = $urabe->select_all("testing.users");
echo json_encode($result);
```
**return:** The same result as the `select()` function.
### select_one()
As the name suggest returns one value a simple string taken the first value found on the first row and firs column, If no values are selected a default value is returned.
```php
string function select_one($sql, $variables = null, $default_val = null);
```
#### select_one() function description
| Name | Data type | Description |
| - | - | - |
| **`$sql`** | **string** | The SQL statement |
| _`$variables`_ | **mixed[]** | The colon-prefixed bind variables placeholder values used in the statement in order. |
| _`$default_val`_ | **string** |The return value if nothing is selected.|
**Warnings:** An Exception is thrown if this method is called when no connection to the database is available or if the SQL statement is invalid.
**Example:** Select the `id` for user2
```php
//$kanojo variable has been initialized correctly
$urabe = new Urabe($kanojo);
$result = $urabe->select_one("SELECT id FROM testing.users WHERE u_name = ?", array('user2'));
echo $result;
```
**return:** The selected value as _String_ value
The output result:
```bash
2
```
### select_items()
From a SQL selection query this methods returns the values taken from the first selected column. The values are returned in an array with no associative key.
```php
string function select_items($sql, $variables = null);
```
#### select_items() function description
| Name | Data type | Description |
| - | - | - |
| **`$sql`** | **string** | The SQL statement |
| _`$variables`_ | **mixed[]** | The colon-prefixed bind variables placeholder values used in the statement in order. |
**Warnings:** An Exception is thrown if this method is called when no connection to the database is available or if the SQL statement is invalid.
**Example:** Select the all user names(`u_name` ) from for user2
```php
//$kanojo variable has been initialized correctly
$urabe = new Urabe($kanojo);
$result = $urabe->select_items("SELECT u_name FROM testing.users");
var_dump ($result);
```
**return:** The selected value as an array.
The output result:
```php
array(2) {
[0]=>
string(5) "user1"
[1]=>
string(5) "user2"
}
```
## Inserting data into the database
This section describe `Urabe` Class functionality for inserting data, for the following examples will be using a table named _USERS_ defined as follows:
`Urabe` has two main methods to simplify the data insertion, one execute a simple insert an the other inserts multiple records in one query.
### Content
- [insert](https://github.com/ANamelessWolf/urabe/wiki/Urabe-Class,-inserting-data#inserting-one-record)
- [insert_bulk](https://github.com/ANamelessWolf/urabe/wiki/Urabe-Class,-inserting-data#inserting-multiple-records)
- [schemas](https://github.com/ANamelessWolf/urabe/wiki/Urabe-Class,-inserting-data#schemas)
### Inserting one record
To add a single record into a given table using `Urabe`, we don't need to write the SQL Statement, instead we call a function, that receives the name of the table and the values defined as an object.
The values schema is describe in the [schema section], for inserting a new user we'll need to pass the following object.
```json
{
"U_NAME": "Mike",
"U_PASS": "<PASSWORD>"
}
```
**Note:** The object is decoded using the PHP method `json_decode`.
```php
UrabeResponse public function insert($table_name, $values);
```
#### insert() function description
| Name | Data type | Description |
| - | - | - |
| **`$table_name`** | **string** | The table name |
| **`$values`** | **object** | The values to insert as key value pair array. |
**Warnings:** An Exception is thrown if this method is called when no connection to the database is available or if the SQL statement is invalid.
**Example:** Insert a new user to the table.
```php
//$kanojo variable has been initialized correctly
$urabe = new Urabe($kanojo);
$user = $body->insert_values;
$result = $urabe->insert("USERS",$user);
echo json_encode($result);
```
**return:** The web service response as an encoding _Sting_ of `UrabeResponse` object. You can override the response message via `UrabeResponse->message` before encoding.
The encoded response is:
```json
{
"succeed": true,
"affected_rows": 1,
"result": [],
"error": null,
"query": {
"sql": "INSERT INTO testing.users (u_name, u_pass) VALUES ($1, $2)",
"parameters": [
"Mike",
"<PASSWORD>"
]
}
}
```
### Inserting multiple records
To insert more than one record you can use `insert_bulk` method that receives the table name, the column names as an array of strings and the insert values as an array of objects.
The columns
```json
[
"U_NAME",
"U_PASS"
]
```
The insert values
```json
[
{
"U_NAME": "Mike",
"U_PASS": "<PASSWORD>"
},
{
"U_NAME": "nameless",
"U_PASS": "<PASSWORD>"
}
]
```
**Note:** The object is decoded using the PHP method `json_decode`.
```php
UrabeResponse function insert_bulk($table_name, $columns, $values);
```
#### insert_bulk() function description
| Name | Data type | Description |
| - | - | - |
| **`$table_name`** | **string** | The table name |
| **`$columns`** | **string[]** | The table column names as an array of strings |
| **`$values`** | **object[]** | The values to insert as key value pair array. |
**Warnings:** An Exception is thrown if this method is called when no connection to the database is available or if the SQL statement is invalid.
**Example:** Insert a two users into the table.
```php
//$kanojo variable has been initialized correctly
$urabe = new Urabe($kanojo);
$columns = array("U_NAME", "U_PASS");
$users = $body->insert_values;
$result = $urabe->insert_bulk("USERS", $users);
echo json_encode($result);
```
**return:** The web service response as an encoding _Sting_ of `UrabeResponse` object. You can override the response message via `UrabeResponse->message` before encoding.
The encoded response is:
```json
{
"succeed": true,
"affected_rows": 2,
"result": [],
"error": null,
"query": {
"sql": "INSERT INTO testing.users (u_name, u_pass) VALUES ($1, $2), ($3, $4)",
"parameters": [
"Mike",
"pass123",
"nameless",
"pass123"
]
}
}
```
## Updating the database
This section describe `Urabe` Class functionality for updating data, for the following examples will be using a table named _USERS_ defined as follows:
`Urabe` simplify the update process with a method that receives the update data and a condition to match.
### Updating
To update some records into a given table using `Urabe`, we don't need to write the SQL Statement, instead we call the function `update`, that receives the name of the table and the values to update and a condition.
The values to update are defined in the `Table Record Schema` schema described in the [schema section](https://github.com/ANamelessWolf/urabe/wiki/Urabe-Class,-updating-the-database#schemas), for example if we want to update the user name we just need to pass the value associated with a key.
```json
{
"U_NAME": "Mike-sama"
}
```
**Note:** The object is decoded using the PHP method `json_decode`.
#### update() function description
```php
UrabeResponse public function update($table_name, $values, $condition);
```
| Name | Data type | Description |
| - | - | - |
| **`$table_name`** | **string** | The table name |
| **`$values`** | **object** | The data to update as column paired valued. |
| **`$condition`** | **string** | The SQL statement update condition. |
**Warnings:** An Exception is thrown if this method is called when no connection to the database is available or if the SQL statement is invalid.
**Example:** Update the username
```php
$urabe = new Urabe($kanojo);
$update_data = $body->update_values;
$result = $urabe->update("USERS", $update_data,"ID = 1");
echo json_encode($result);
```
**return:** The web service response as an encoding _Sting_ of `UrabeResponse` object. You can override the response message via `UrabeResponse->message` before encoding.
The encoded response is:
```json
{
"succeed": true,
"affected_rows": 1,
"result": [],
"error": null,
"query": {
"sql": "UPDATE testing.users SET u_name = $1 WHERE id = 1",
"parameters": [
"MikeUpdated"
]
}
}
```
#### update_by_field() function description
`Urabe` has an alias function that calls the update condition, instead of passing the condition it receives a column name and a column value. The condition expects that the column value is equal to the given value.
```php
UrabeResponse public function update_by_field($table_name, $values, $column_name, $column_value)
```
| Name | Data type | Description |
| - | - | - |
| **`$table_name`** | **string** | The table name |
| **`$values`** | **object** | The data to update as column paired valued. |
| **`$column_name`** | **string** | The name of a column of the table. |
| **`$column_value`** | **mixed** | The value to compare equals to the table name. |
**Warnings:** An Exception is thrown if this method is called when no connection to the database is available or if the SQL statement is invalid.
**Example:** Update the username
```php
$urabe = new Urabe($kanojo);
$update_data = $body->update_values;
$result = $urabe->update_by_field("USERS", $update_data,"ID", 1);
echo json_encode($result);
```
**return:** The web service response as an encoding _Sting_ of `UrabeResponse` object. You can override the response message via `UrabeResponse->message` before encoding.
The encoded response is:
```json
{
"succeed": true,
"affected_rows": 1,
"result": [],
"error": null,
"query": {
"sql": "UPDATE testing.users SET u_name = $1 WHERE id = 1",
"parameters": [
"MikeUpdated"
]
}
}
```
## Delete data from the database
This section describe `Urabe` Class functionality for deleting data, for the following examples will be using a table named _USERS_ defined as follows:
`Urabe` simplify the delete process with a method that receives the condition to match in a Delete SQL statement.
### Deleting
To delete some records into a given table using `Urabe`, we don't need to write the SQL Statement, instead we call the function `delete`, and defines a delete condition.
#### delete() function description
```php
UrabeResponse public function delete($table_name, $condition);
```
| Name | Data type | Description |
| - | - | - |
| **`$table_name`** | **string** | The table name |
| **`$condition`** | **string** | The SQL statement update condition. |
**Warnings:** An Exception is thrown if this method is called when no connection to the database is available or if the SQL statement is invalid.
**Example:** Delete an user record
```php
$urabe = new Urabe($kanojo);
$result = $urabe->delete("USERS", "ID = 1");
echo json_encode($result);
```
**return:** The web service response as an encoding _Sting_ of `UrabeResponse` object. You can override the response message via `UrabeResponse->message` before encoding.
The encoded response is:
```json
{
"succeed": true,
"affected_rows": 1,
"result": [],
"error": null,
"query": "DELETE FROM testing.users WHERE id = 1"
}
```
#### Delete by field function description
`Urabe` has an alias function that calls the delete condition, instead of passing the condition it receives a column name and a column value. The condition expects that the column value is equal to the given value.
```php
UrabeResponse public function delete_by_field($table_name, $column_name, $column_value)
```
| Name | Data type | Description |
| - | - | - |
| **`$table_name`** | **string** | The table name |
| **`$column_name`** | **string** | The name of a column of the table. |
| **`$column_value`** | **mixed** | The value to compare equals to the table name. |
**Warnings:** An Exception is thrown if this method is called when no connection to the database is available or if the SQL statement is invalid.
**Example:** Delete an user record
```php
$urabe = new Urabe($kanojo);
$update_data = $body->update_values;
$result = $urabe->delete_by_field("USERS", "ID", 1);
echo json_encode($result);
```
**return:** The web service response as an encoding _Sting_ of `UrabeResponse` object. You can override the response message via `UrabeResponse->message` before encoding.
The encoded response is:
```json
{
"succeed": true,
"affected_rows": 0,
"result": [],
"error": null,
"query": {
"sql": "DELETE FROM testing.users WHERE id = $1",
"parameters": [
1
]
}
}
```
### Schemas
The following schemas are used when inserting data, using the `Urabe Class`
#### Table record schema
```json
{
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "#/table-record-schema.json",
"type": "object",
"title": "A table record",
"description": "The definition of a table record, pairing column name with its value",
"patternProperties": {
"^[a-zA-Z_$0-9]+": {
"type": [
"integer",
"number",
"string",
"boolean",
"null"
]
}
},
"examples": [
{
"U_NAME": "Mike",
"U_PASS": "<PASSWORD>"
},
{
"id": 2,
"user_name": "Mike",
"registration_date": null
}
]
}
```
#### Column names schema
```json
{
"$id": "#/columns-schema.json",
"type": "array",
"title": "The column names",
"description": "The name of the columns",
"items": {
"$id": "#/properties/columns/items",
"type": "string",
"title": "Column name",
"examples": [
"id",
"user_name",
"registration_date"
],
"pattern": "^[a-zA-Z_$0-9]"
},
"minItems": 1,
"uniqueItems": true
}
```
#### Table records schema
```json
{
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "#/table-records-schema.json",
"type": "array",
"title": "A collection of table records",
"description": "Define more than one table record",
"items": {
"$id": "#/properties/items/table-record",
"type": "object",
"title": "A table record",
"description": "The definition of a table record, pairing column name with its value",
"$ref": "#/table-record-schema.json"
},
"minItems": 1
}
```<file_sep>/testing/utils/UrabeTestUtils.php
<?php
include_once "TestUtils.php";
include_once "../src/Urabe.php";
/**
* This file defines the tests available for testing the
* Urabe class
*
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
/**
* This function is an example for testing a SQL selection query and
* fetching the result via a defined parser
*
* @param Urabe $urabe The database data manager
* @param object $body The request body decoded as an object from JSON data
* @return UrabeResponse The selection result as a web service response
*/
function test_select($urabe, $body)
{
$sql = $body->sql_select;
$result = $urabe->select($sql);
$result->message = "Urabe test selection query with default parser";
return $result;
}
/**
* This function is an example for testing the table definition selection
*
* @param Urabe $urabe The database data manager
* @param object $body The request body decoded as an object from JSON data
* @return UrabeResponse The selection result as a web service response
*/
function test_get_table_definition($urabe, $body)
{
$result = $urabe->get_table_definition($body->table_name);
$result->message = "Urabe test get table definition";
return $result;
}
/**
* This function is an example for testing a SQL selection query that returns one
* value. The first row and first column
*
* @param Urabe $urabe The database data manager
* @param object $body The request body decoded as an object from JSON data
* @return string Returns the selected value
*/
function test_select_one($urabe, $body)
{
$sql = $body->sql_simple;
$result = $urabe->select_one($sql);
return $result;
}
/**
* This function is an example for testing a SQL selection query that returns the values from
* the first selected column.
*
* @param Urabe $urabe The database data manager
* @param object $body The request body decoded as an object from JSON data
* @return mixed[] Returns the selected values
*/
function test_select_items($urabe, $body)
{
$sql = $body->sql_simple;
$result = $urabe->select_items($sql);
return $result;
}
/**
* This functions is an example for testing an execute SQL statement. The
* test returns a flag indicating if the result succeed and the number of affected rows
*
* @param Urabe $urabe The database data manager
* @param object $body The request body decoded as an object from JSON data
* @return UrabeResponse The execute result as a web service response
*/
function test_query($urabe, $body)
{
$sql = $body->update_sql;
return $urabe->query($sql);
}
/**
* This functions is an example for testing an insert SQL statement. The
* test returns a flag indicating if the result succeed and the number of inserted rows
*
* @param Urabe $urabe The database data manager
* @param object $body The request body decoded as an object from JSON data
* @return UrabeResponse The execute result as a web service response
*/
function test_insert($urabe, $body)
{
$insert_params = $body->insert_params;
if ($body->driver == "PG")
$table_name = $body->schema . "." . $body->table_name;
else
$table_name = $body->table_name;
return $urabe->insert($table_name, $insert_params);
}
/**
* This functions is an example for testing an insert bulk SQL statement. The
* test returns a flag indicating if the result succeed and the number of inserted rows
*
* @param Urabe $urabe The database data manager
* @param object $body The request body decoded as an object from JSON data
* @return UrabeResponse The execute result as a web service response
*/
function test_insert_bulk($urabe, $body)
{
$bulk = $body->insert_bulk;
if ($body->driver == "PG")
$table_name = $body->schema . "." . $body->table_name;
else
$table_name = $body->table_name;
return $urabe->insert_bulk($table_name, $bulk->columns, $bulk->values);
}
/**
* This functions is an example for testing an update SQL statement. The
* test returns a flag indicating if the result succeed and the number of affected rows
*
* @param Urabe $urabe The database data manager
* @param object $body The request body decoded as an object from JSON data
* @return UrabeResponse The execute result as a web service response
*/
function test_update($urabe, $body)
{
$values = $body->update_params;
$column_name = $body->column_name;
$column_value = $body->column_value;
if ($body->driver == "PG")
$table_name = $body->schema . "." . $body->table_name;
else
$table_name = $body->table_name;
return $urabe->update($table_name, $values, "$column_name = $column_value");
}
/**
* This functions is an example for testing an update SQL statement. The
* test returns a flag indicating if the result succeed and the number of affected rows
*
* @param Urabe $urabe The database data manager
* @param object $body The request body decoded as an object from JSON data
* @return UrabeResponse The execute result as a web service response
*/
function test_update_by_field($urabe, $body)
{
$values = $body->update_params;
$column_name = $body->column_name;
$column_value = $body->column_value;
if ($body->driver == "PG")
$table_name = $body->schema . "." . $body->table_name;
else
$table_name = $body->table_name;
return $urabe->update_by_field($table_name, $values, $column_name, $column_value);
}
/**
* This functions is an example for testing a delete SQL statement. The
* test returns a flag indicating if the result succeed and the number of affected rows
*
* @param Urabe $urabe The database data manager
* @param object $body The request body decoded as an object from JSON data
* @return UrabeResponse The execute result as a web service response
*/
function test_delete($urabe, $body)
{
if ($body->driver == "PG")
$table_name = $body->schema . "." . $body->table_name;
else
$table_name = $body->table_name;
$column_name = $body->column_name;
$column_value = $body->column_value;
return $urabe->delete($table_name, "$column_name = $column_value");
}
/**
* This functions is an example for testing a delete SQL statement. The
* test returns a flag indicating if the result succeed and the number of affected rows
*
* @param Urabe $urabe The database data manager
* @param object $body The request body decoded as an object from JSON data
* @return UrabeResponse The execute result as a web service response
*/
function test_delete_by_field($urabe, $body)
{
if ($body->driver == "PG")
$table_name = $body->schema . "." . $body->table_name;
else
$table_name = $body->table_name;
$column_name = $body->column_name;
$column_value = $body->column_value;
return $urabe->delete_by_field($table_name, $column_name, $column_value);
}
/**
* This functions is an example for testing the sql place holders formatter
*
* @param Urabe $urabe The database data manager
* @param object $body The request body decoded as an object from JSON data
* @return UrabeResponse The execute result as a web service response
*/
function test_format_sql_place_holders($urabe, $body)
{
$sql = $body->sql_common;
return $urabe->format_sql_place_holders($sql);
}
/**
* This function list all available tests
*
* @param KanojoX $kanojo The database connector
* @return UrabeResponse The selection result as a web service response
*/
function test_get_available_tests()
{
$functions = get_defined_functions();
$functions = $functions["user"];
$test_func = array();
for ($i = 0; $i < sizeof($functions); $i++) {
if (substr($functions[$i], 0, 5) == TEST_VAR_NAME . "_")
array_push($test_func, str_replace(array("test_"), array(), $functions[$i]));
}
$response = array("msg" => "Available functions", "tests" => $test_func, "size" => sizeof($test_func));
return $response;
}
?><file_sep>/src/PGSQL_Result.php
<?php
require_once "Enum.php";
/**
* Database connection drivers supported by URABE API
* The collection of enums that manages URABE API
* @api Makoto Urabe DB Manager DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
abstract class PGSQL_Result extends Enum
{
/**
* @var string PGSQL_EMPTY_QUERY
* Result code for empty query
*/
const PGSQL_EMPTY_QUERY = 0;
/**
* @var string PGSQL_COMMAND_OK
* Result code for command OK
*/
const PGSQL_COMMAND_OK = 1;
/**
* @var string PGSQL_TUPLES_OK
* Result code for tuples
*/
const PGSQL_TUPLES_OK = 2;
/**
* @var string PGSQL_COPY_TO
* Result code for copy to
*/
const PGSQL_COPY_TO = 3;
/**
* @var string PGSQL_COPY_FROM
* Result code for copy from
*/
const PGSQL_COPY_FROM = 4;
/**
* @var string PGSQL_BAD_RESPONSE
* Result code for bad response
*/
const PGSQL_BAD_RESPONSE = 5;
/**
* @var string PGSQL_NONFATAL_ERROR
* Result code for non fatal error
*/
const PGSQL_NONFATAL_ERROR = 7;
/**
* @var string PGSQL_FATAL_ERROR
* Result code for fatal error
*/
const PGSQL_FATAL_ERROR = 8;
}
?><file_sep>/src/PGKanojoX.php
<?php
include_once "KanojoX.php";
include_once "PGSQL_Result.php";
/**
* A PostgreSQL Connection object
*
* Kanojo means girlfriend in japanese and this class saves the connection data structure used to connect to
* an PostgreSQL database.
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
class PGKanojoX extends KanojoX
{
const DEFT_STMT_NAME = "";
/**
* @var string $schema The database schema used to filter the table definition
*/
public $schema;
/**
* Initialize a new instance of the connection object
*/
public function __construct()
{
parent::__construct();
$this->db_driver = DBDriver::PG;
}
/**
* Open a PostgreSQL Database connection
*
* @return resource The database connection object
*/
public function connect()
{
try {
$host = $this->host;
$port = $this->port;
$dbname = $this->db_name;
$username = $this->user_name;
$passwd = $<PASSWORD>;
if (!isset($this->host) || strlen($host) == 0)
$host = "127.0.0.1";
$connString = $strConn = "host='$host' port='$port' dbname='$dbname' user='$username' ";
if (isset($passwd) && strlen($passwd) > 0)
$connString .= "password='$<PASSWORD>'";
$this->connection = pg_connect($connString);
return $this->connection;
} catch (Exception $e) {
return $this->error(sprintf(ERR_BAD_CONNECTION, $e->getMessage()));
}
}
/**
* Closes a PostgreSQL database connection resource.
* The connection is the last connection made by pg_connect().
*
* @return bool Returns TRUE on success or FALSE on failure.
*/
public function close()
{
$this->free_result();
if (!isset($this->connection))
throw new Exception(ERR_NOT_CONNECTED);
return pg_close($this->connection);
}
/**
* Frees the memory associated with a result
*
* @return void
*/
public function free_result()
{
foreach ($this->statementsIds as &$statementId)
pg_free_result($statementId);
}
/**
* Gets the placeholders format for the original prepared query string.
* The number of elements in the array must match the number of placeholders.
*
* @param int $index The place holder index if needed
* @return string The place holder at the given position
*/
public function get_param_place_holder($index = null)
{
return '$' . $index;
}
/**
* Get the last error message string of a connection
*
* @param string|null $sql The last executed statement. Can be null
* @param ConnectionError $error If the error exists pass the error
* @return ConnectionError The connection error
*/
public function error($sql, $error = null)
{
if (is_null($error)) {
$this->error = new ConnectionError();
$this->error->message = pg_last_error($this->connection);
$this->error->code = pg_result_status($this->connection);
$this->error->sql = $sql;
} else
$this->error = $error;
return $this->error;
}
/**
* Sends a request to execute a prepared statement with given parameters,
* and waits for the result
*
* @param string $sql The SQL Statement
* @param array|null $variables The colon-prefixed bind variables placeholder used in the statement, can be null.
* @throws Exception An Exception is raised if the connection is null or executing a bad query
* @return UrabeResponse Returns the service response formatted as an executed response
*/
public function execute($sql, $variables = null)
{
if (isset($variables) && is_array($variables)) {
$result = pg_prepare($this->connection, self::DEFT_STMT_NAME, $sql);
$sql = (object)(array(NODE_SQL => $sql, NODE_PARAMS => $variables));
if ($result) {
$vars = array();
$statement = pg_execute($this->connection, self::DEFT_STMT_NAME, $variables);
} else {
$err = $this->error($sql, $this->get_error($result == false ? null : $result, $sql));
throw new UrabeSQLException($err);
}
} else {
$result = pg_send_query($this->connection, $sql);
$statement = pg_get_result($this->connection);
}
if (!$statement || pg_result_status($statement) != PGSQL_Result::PGSQL_COMMAND_OK) {
$err = $this->error($sql, $this->get_error($statement == false ? null : $statement, $sql));
throw new UrabeSQLException($err);
} else {
array_push($this->statementsIds, $statement);
return (new UrabeResponse())->get_execute_response(true, pg_affected_rows($statement), $sql);
}
}
/**
* Returns an associative array containing the next result-set row of a
* query. Each array entry corresponds to a column of the row.
*
* @param string $sql The SQL Statement
* @param array $variables The colon-prefixed bind variables placeholder used in the statement.
* @throws Exception An Exception is thrown parsing the SQL statement or by connection error
* @return array Returns an associative array.
* */
public function fetch_assoc($sql, $variables = null)
{
$rows = array();
if (!(pg_connection_status($this->connection) === PGSQL_CONNECTION_OK))
throw new Exception(ERR_NOT_CONNECTED);
if (isset($variables) && is_array($variables)) {
$result = pg_prepare($this->connection, self::DEFT_STMT_NAME, $sql);
$sql = (object)(array(NODE_SQL => $sql, NODE_PARAMS => $variables));
if ($result) {
$vars = array();
foreach ($variables as &$value)
array_push($vars, $value);
$ok = pg_execute($this->connection, self::DEFT_STMT_NAME, $vars);
} else {
$err = $this->error($sql, $this->get_error($result == false ? null : $result, $sql));
throw new UrabeSQLException($err);
}
} else {
$ok = pg_query($this->connection, $sql);
}
//fetch result
if ($ok) {
while ($row = pg_fetch_assoc($ok))
$this->parser->parse($rows, $row);
} else {
$err = $this->error($sql, $this->get_error($ok == false ? null : $result, $sql));
throw new UrabeSQLException($err);
}
return $rows;
}
/**
* Gets the query for selecting the table definition
*
* @param string $table_name The table name
* @return string The table definition selection query
*/
public function get_table_definition_query($table_name)
{
$fields = PG_FIELD_COL_ORDER . ", " . PG_FIELD_COL_NAME . ", " . PG_FIELD_DATA_TP . ", " .
PG_FIELD_CHAR_LENGTH . ", " . PG_FIELD_NUM_PRECISION . ", " . PG_FIELD_NUM_SCALE;
if (isset($this->schema)) {
$schema = $this->schema;
$sql = "SELECT $fields FROM information_schema.columns WHERE table_name = '$table_name' AND table_schema = '$schema'";
} else
$sql = "SELECT $fields FROM information_schema.columns WHERE table_name = '$table_name'";
return $sql;
}
/**
* Gets the table definition parser for the PG connector
*
* @return array The table definition fields as an array of FieldDefinition
*/
public function get_table_definition_parser()
{
$fields = array(
PG_FIELD_COL_ORDER => new FieldDefinition(0, PG_FIELD_COL_ORDER, PARSE_AS_INT),
PG_FIELD_COL_NAME => new FieldDefinition(1, PG_FIELD_COL_NAME, PARSE_AS_STRING),
PG_FIELD_DATA_TP => new FieldDefinition(2, PG_FIELD_DATA_TP, PARSE_AS_STRING),
PG_FIELD_CHAR_LENGTH => new FieldDefinition(3, PG_FIELD_CHAR_LENGTH, PARSE_AS_INT),
PG_FIELD_NUM_PRECISION => new FieldDefinition(4, PG_FIELD_NUM_PRECISION, PARSE_AS_INT),
PG_FIELD_NUM_SCALE => new FieldDefinition(5, PG_FIELD_NUM_SCALE, PARSE_AS_INT)
);
return $fields;
}
/**
* Gets the table definition mapper for the PG connector
*
* @return array The table mapper as KeyValued<String,String> array
*/
public function get_table_definition_mapper()
{
$map = array(
PG_FIELD_COL_ORDER => TAB_DEF_INDEX,
PG_FIELD_COL_NAME => TAB_DEF_NAME,
PG_FIELD_DATA_TP => TAB_DEF_TYPE,
PG_FIELD_CHAR_LENGTH => TAB_DEF_CHAR_LENGTH,
PG_FIELD_NUM_PRECISION => TAB_DEF_NUM_PRECISION,
PG_FIELD_NUM_SCALE => TAB_DEF_NUM_SCALE
);
return $map;
}
/**
* Gets the error found in a ORACLE resource object could be a
* SQL statement error or a connection error.
*
* @param string $sql The SQL statement
* @param resource $resource The SQL connection
* @return ConnectionError The connection or transaction error
*/
private function get_error($resource, $sql)
{
$err_msg = pg_last_error($this->connection);
$this->error = new ConnectionError();
$this->error->code = is_null($resource) ? PGSQL_Result::PGSQL_BAD_RESPONSE : pg_result_status($resource);
$this->error->message = $err_msg ? $err_msg : "";
$this->error->sql = $sql;
return $this->error;
}
}
?><file_sep>/src/GETService.php
<?php
include_once "HasamiRESTfulService.php";
/**
* GET Service Class
* This class defines a restful service with a request verbose GET.
* This method is often used to select un protected data from the database.
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
class GETService extends HasamiRESTfulService
{
/**
* __construct
*
* Initialize a new instance of the GET Service class.
* A default service task is defined as a callback using the function GETService::default_GET_action
*
* @param IHasami $wrapper The web service wrapper
*/
public function __construct($wrapper)
{
$data = $wrapper->get_request_data();
$data->extra->{TAB_NAME} = $wrapper->get_table_name();
$data->extra->{TAB_COL_FILTER} = $wrapper->get_selection_filter();
$urabe = $wrapper->get_urabe();
parent::__construct($data, $urabe);
$this->wrapper = $wrapper;
$this->service_task = function ($data, $urabe) {
return $this->default_GET_action($data, $urabe);
};
}
/**
* Wraps the select function from urabe place holders are passed with @index.
* Once the SQL selection statement is executed the data is parsed as defined in the given parser.
* If the parser is null uses the parser defined in the connector object KanojoX::parser
*
* @param string $sql The SQL statement
* @param array $variables The colon-prefixed bind variables placeholder used in the statement, @1..@n
* @param MysteriousParser $row_parser The row parser.
* @throws Exception An Exception is thrown if not connected to the database or if the SQL is not valid
* @return UrabeResponse The SQL selection result
*/
public function select($sql, $variables = null, $row_parser = null)
{
return $this->urabe->select($sql, $variables, $row_parser);
}
/**
* Defines the default GET action, by default selects all data from the wrapper table name that match the
* column filter. If the column filter name is not given in the GET variables this function selects
* all data from the table
* @param WebServiceContent $data The web service content
* @param Urabe $urabe The database manager
* @throws Exception An Exception is thrown if the response can be processed correctly
* @return UrabeResponse The server response
*/
protected function default_GET_action($data, $urabe)
{
try {
$table_name = $data->extra->{TAB_NAME};
$filter = $data->extra->{TAB_COL_FILTER};
if (!is_null($filter)) {
$sql = $urabe->format_sql_place_holders("SELECT * FROM $table_name WHERE $filter");
return $urabe->select($sql);
} else
return $urabe->select_all($table_name);
} catch (Exception $e) {
throw new Exception("Error Processing Request, " . $e->getMessage(), $e->getCode());
}
}
}
?><file_sep>/src/ServiceStatus.php
<?php
require_once "Enum.php";
/**
* Defines an availability status when executing a service
* @api Makoto Urabe DB Manager DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
abstract class ServiceStatus extends Enum
{
/**
* @var string AVAILABLE
* The service can be accessed without restrictions
*/
const AVAILABLE = 0;
/**
* @var string BLOCKED
* The service can be accessed
*/
const BLOCKED = 1;
/**
* @var string LOGGED
* The service can be accessed only for logged users
*/
const LOGGED = 2;
}
?><file_sep>/src/FieldDefinition.php
<?php
/**
* Field Definition Class
*
* This class encapsulates a table column definition and format it values to JSON field value
* Each table field is associated to a column and stores its index and data type.
*
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
class FieldDefinition
{
/**
* @var int The column index
*/
public $column_index;
/**
* @var string The column name
*/
public $column_name;
/**
* @var string The column parsing type
*/
public $data_type;
/**
* @var string The column db_type
*/
public $db_type;
/**
* Initialize a new instance of a Field Definition class
*
* @param string $index The column index
* @param string $column The column name
* @param string $data_type The column parsing type
*/
public function __construct($index, $column, $data_type)
{
$this->column_index = $index;
$this->column_name = $column;
$this->data_type = $data_type;
}
/**
* Gets the value from a string in the row definition data type
*
* @param string $value The selected value as string
* @return mixed The value as the same type of the table definition.
*/
public function get_value($value)
{
if (is_null($value))
return null;
else if ($this->data_type == PARSE_AS_STRING)
return strval($value);
else if ($this->data_type == PARSE_AS_INT || $this->data_type == PARSE_AS_LONG)
return intval($value);
else if ($this->data_type == PARSE_AS_NUMBER)
return doubleval($value);
else if ($this->data_type == PARSE_AS_DATE)
return $value;
else if ($this->data_type == PARSE_AS_BOOLEAN)
return boolval($value);
else
return $value;
}
/**
* Formats a value to be use as a place holder parameter
*
* @param DBDriver $driver The selected value as string
* @param mixed $value The selected value as string
* @return mixed The value as the same type of the table definition.
*/
public function format_value($driver, $value)
{
if (is_null($value))
return null;
else if (in_array($this->data_type, array(PARSE_AS_STRING, PARSE_AS_INT, PARSE_AS_LONG, PARSE_AS_NUMBER)))
return $value;
else
return strval($value);
}
/**
* Creates a Field Definition object from a data type
*
* @param string $data The data type
* @return FieldDefinition The field definition object
*/
public static function create($data)
{
$tp = $data->data_type;
if ($tp == PARSE_AS_STRING)
$field_definition = new StringFieldDefinition($data->column_index, $data->column_name, PARSE_AS_STRING, $data->char_max_length);
else if ($tp == PARSE_AS_INT)
$field_definition = new NumericFieldDefinition($data->column_index, $data->column_name, PARSE_AS_INT, $data->numeric_precision, $data->numeric_scale);
else if ($tp == PARSE_AS_NUMBER)
$field_definition = new NumericFieldDefinition($data->column_index, $data->column_name, PARSE_AS_NUMBER, $data->numeric_precision, $data->numeric_scale);
else if ($tp == PARSE_AS_DATE)
$field_definition = new DateFieldDefinition($data->column_index, $data->column_name, PARSE_AS_DATE, $data->date_format);
else if ($tp == PARSE_AS_LONG)
$field_definition = new NumericFieldDefinition($data->column_index, $data->column_name, PARSE_AS_LONG, $data->numeric_precision, $data->numeric_scale);
else if ($tp == PARSE_AS_BOOLEAN)
$field_definition = new BooleanFieldDefinition($data->column_index, $data->column_name, PARSE_AS_BOOLEAN);
else
$field_definition = new FieldDefinition($data->column_index, $data->column_name, $data->db_type);
$field_definition->db_type = $data->db_type;
return $field_definition;
}
}
?><file_sep>/src/UrabeSQLException.php
<?php
/**
* This class represents a SQL exception
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
class UrabeSQLException extends Exception
{
/**
* @var string $sql The SQL statement text. If there was no statement, this is an empty string.
*/
public $sql;
/**
* Initialize a new instance of an Urabe SQL Exception
*
* @param ConnectionError $error The connection error
*/
public function __construct($error)
{
$search = array("\t", "\n", "\r", " ", " ", "\\n");
$replace = array("", " ", " ", "", "", " ");
$msg = str_replace($search, $replace, $error->message);
$msg = sprintf(ERR_BAD_QUERY, $msg);
parent::__construct($msg, $error->code);
$this->sql = $error->sql;
}
}
?><file_sep>/testing/utils/HasamiUtilsTestUtils.php
<?php
include_once "../src/ORACLEKanojoX.php";
include_once "../src/PGKanojoX.php";
include_once "../src/MYSQLKanojoX.php";
include_once "TestUtils.php";
/**
* Writes a connection file in the tmp folder as conn_file.json"
* @param object $body The request body
* @return object The response message
*/
function test_write_connection_file($body)
{
$kanojo = pick_connector($body->driver, $body);
$kanojo->init($body->connection);
save_connection("../tmp/conn_file.json", $kanojo);
$response = new UrabeResponse();
return $response->get_response("JSON file created", array());
}
/**
* Reads a connection file and returns the connection file
*
* @param object $body The request body
* @return KanojoX The database connector
*/
function test_read_connection_file($body)
{
return get_KanojoX_from_file("../tmp/conn_file.json");
}
?><file_sep>/testing/ClassValidatorTester.php
<?php
ini_set('display_errors', 1);
ini_set('display_startup_errors', 1);
error_reporting(E_ALL);
//IF an unknown error occurred in a class without doing nothing update this resource
include_once "../src/KanojoX.php";
?><file_sep>/testing/ConnectionTester.php
<?php
/**
* This file test the connection to a given database, specifying the data connection and
* Kanojo driver.
*
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
include_once "../src/KanojoX.php";
include_once "../src/ORACLEKanojoX.php";
include_once "../src/PGKanojoX.php";
include_once "../src/MYSQLKanojoX.php";
//Test Response
$response = (object)array(
"msg" => "",
"status" => true,
"error" => ""
);
//0: Reads the body
$body = get_body_as_json();
//1: Selects the driver connector
if ($body->driver == "ORACLE")
$kanojo = new ORACLEKanojoX();
else if ($body->driver == "PG")
$kanojo = new PGKanojoX();
else if ($body->driver == "MYSQL")
$kanojo = new MYSQLKanojoX();
else {
$response->msg = "Driver " + (isset($body->driver) ? $body->driver . "not supported." : " not valid.");
$response->status = false;
}
if (isset($kanojo)) {
//2: Initialize the connection data
$kanojo->init($body);
//3: Connect to the Database
$conn = $kanojo->connect();
if ($conn)
$response->msg = "Connected to " . $body->driver;
else {
http_response_code(403);
$response->msg = "Error connecting to " . $body->driver . ". See error for more details.";
$response->error = $kanojo->get_last_error();//KanojoX::$errors;
$response->status = false;
}
$response->{"settings"} = KanojoX::$settings;
$kanojo->close();
}
echo json_encode($response);
?><file_sep>/testing/utils/TestUtils.php
<?php
/**
* This file contains functions that help to test Urabe project
*
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
/**
* @var string TEST_VAR_NAME
* The name of the variable in GET Vars that saves the test to run
*/
const TEST_VAR_NAME = "test";
/**
* Picks a Kanojo database connecter depending on the given driver
*
* @param string $driver The driver name; ORACLE|PG|MYSQL
* @param mixed $body The request body
* @throws Exception An exception is thrown if the driver is not supported
* @return KanojoX The database connector
*/
function pick_connector($driver, $body)
{
if ($driver == "ORACLE") {
$kanojo = new ORACLEKanojoX();
$kanojo->owner = $body->owner;
} else if ($driver == "PG") {
$kanojo = new PGKanojoX();
$kanojo->schema = $body->schema;
} else if ($driver == "MYSQL")
$kanojo = new MYSQLKanojoX();
else
throw new Exception("Driver " + (isset($driver) ? $driver . "not supported." : " not valid."));
return $kanojo;
}
?><file_sep>/src/ORACLEKanojoX.php
<?php
include_once "KanojoX.php";
/**
* An ORACLE Connection object
*
* Kanojo means girlfriend in japanese and this class saves the connection data structure used to connect to
* an Oracle database.
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
class ORACLEKanojoX extends KanojoX
{
/**
* @var string DEFAULT_CHAR_SET
* The default char set, is UTF8
*/
const DEFAULT_CHAR_SET = 'AL32UTF8';
/**
* @var string ERR_CODE
* The OCI Error field for error code
*/
const ERR_CODE = 'code';
/**
* @var string ERR_MSG
* The OCI Error field for error message
*/
const ERR_MSG = 'message';
/**
* @var string ERR_SQL
* The OCI Error field for error SQL
*/
const ERR_SQL = 'sqltext';
/**
* @var string $owner The table owner used to filter the table definition
*/
public $owner;
/**
* Initialize a new instance of the connection object
*/
public function __construct()
{
parent::__construct();
$this->db_driver = DBDriver::ORACLE;
}
/**
* Open an ORACLE Database connection
*
* @return resource The database connection object
*/
public function connect()
{
//try {
$host = $this->host;
$port = $this->port;
$dbname = $this->db_name;
$username = $this->user_name;
$passwd = $this->password;
if (!isset($host) || strlen($host) == 0)
$host = "127.0.0.1";
$connString = $this->buildConnectionString($host, $dbname, $port);
$this->connection = oci_connect($username, $passwd, $connString, self::DEFAULT_CHAR_SET);
if ($this->connection)
return $this->connection;
else
throw new Exception(ERR_BAD_CONNECTION);
}
/**
* This function builds a connection string to connect to ORACLE
* by default is connected via SID
*
* @return string The connection string
*/
public function buildConnectionString($host, $dbname, $port)
{
return create_SID_connection($host, $dbname, $port);
}
/**
* Closes a connection
*
* @return bool Returns TRUE on success or FALSE on failure.
*/
public function close()
{
$this->free_result();
if (!$this->connection)
throw new Exception(ERR_NOT_CONNECTED);
return oci_close($this->connection);
}
/**
* Frees the memory associated with a result
*
* @return void
*/
public function free_result()
{
foreach ($this->statementsIds as &$statementId)
oci_free_statement($statementId);
}
/**
* Get the last error message string of a connection
*
* @param string|null $sql The last executed statement. Can be null
* @param ConnectionError $error If the error exists pass the error
* @return ConnectionError The connection error
*/
public function error($sql, $error = null)
{
if (is_null($error))
$this->error = $this->get_error($this->connection);
else
$this->error = $error;
//If SQL error exist
$this->error->sql = isset($sql) ? $sql : $error[self::ERR_SQL];
return $this->error;
}
/**
* Gets the error found in a ORACLE resource object could be a
* SQL statement error or a connection error.
*
* @param resource $resource The SQL statement or SQL connection
* @return ConnectionError The connection or transaction error
*/
private function get_error($resource)
{
$e = oci_error($resource);
$this->error = new ConnectionError();
$this->error->code = $e[self::ERR_CODE];
$this->error->message = $e[self::ERR_MSG];
return $this->error;
}
/**
* Sends a request to execute a prepared statement with given parameters,
* and waits for the result
*
* @param string $sql The SQL Statement
* @param array|null $variables The colon-prefixed bind variables placeholder used in the statement, can be null.
* @throws Exception An Exception is raised if the connection is null or executing a bad query
* @return UrabeResponse Returns the service response formatted as an executed response
*/
public function execute($sql, $variables = null)
{
if (!isset($this->connection))
throw new Exception(ERR_NOT_CONNECTED);
$statement = $this->parse($this->connection, $sql);
if (isset($variables) && is_array($variables))
$this->bind($statement, $variables);
$ok = oci_execute($statement);
if ($ok) {
array_push($this->statementsIds, $statement);
return (new UrabeResponse())->get_execute_response(true, oci_num_rows($statement), $sql);
} else {
$err = $this->error($sql, $this->get_error($statement));
throw new UrabeSQLException($err);
}
}
/**
* Returns an associative array containing the next result-set row of a
* query. Each array entry corresponds to a column of the row.
*
* @param string $sql The SQL Statement
* @param array $variables The colon-prefixed bind variables placeholder used in the statement.
* @throws Exception An Exception is thrown parsing the SQL statement or by connection error
* @return array Returns an associative array.
* */
public function fetch_assoc($sql, $variables = null)
{
$rows = array();
if (!$this->connection)
throw new Exception(ERR_NOT_CONNECTED);
$statement = $this->parse($this->connection, $sql, $variables);
$class = get_resource_type($statement);
if ($class == CLASS_ERR)
throw (!is_null($statement->sql) ? new UrabeSQLException($statement) : new Exception($statement->message, $statement->code));
else {
array_push($this->statementsIds, $statement);
$ok = oci_execute($statement);
if ($ok) {
while ($row = oci_fetch_assoc($statement))
$this->parser->parse($rows, $row);
} else {
$err = $this->error($sql, $this->get_error($statement));
throw new UrabeSQLException($err);
}
}
return $rows;
}
/**
* Gets the query for selecting the table definition
*
* @param string $table_name The table name
* @return string The table definition selection query
*/
public function get_table_definition_query($table_name)
{
$fields = ORACLE_FIELD_COL_ORDER . ", " . ORACLE_FIELD_COL_NAME . ", " . ORACLE_FIELD_DATA_TP . ", " .
ORACLE_FIELD_CHAR_LENGTH . ", " . ORACLE_FIELD_NUM_PRECISION . ", " . ORACLE_FIELD_NUM_SCALE;
if (isset($this->owner)) {
$owner = $this->owner;
$sql = "SELECT $fields FROM ALL_TAB_COLS WHERE TABLE_NAME = '$table_name' AND OWNER = '$owner'";
} else
$sql = "SELECT $fields FROM ALL_TAB_COLS WHERE TABLE_NAME = '$table_name'";
return $sql;
}
/**
* Gets the table definition parser for the ORACLE connector
*
* @return array The table definition fields as an array of FieldDefinition
*/
public function get_table_definition_parser()
{
$fields = array(
ORACLE_FIELD_COL_ORDER => new FieldDefinition(0, ORACLE_FIELD_COL_ORDER, PARSE_AS_INT),
ORACLE_FIELD_COL_NAME => new FieldDefinition(1, ORACLE_FIELD_COL_NAME, PARSE_AS_STRING),
ORACLE_FIELD_DATA_TP => new FieldDefinition(2, ORACLE_FIELD_DATA_TP, PARSE_AS_STRING),
ORACLE_FIELD_CHAR_LENGTH => new FieldDefinition(3, ORACLE_FIELD_CHAR_LENGTH, PARSE_AS_INT),
ORACLE_FIELD_NUM_PRECISION => new FieldDefinition(4, ORACLE_FIELD_NUM_PRECISION, PARSE_AS_INT),
ORACLE_FIELD_NUM_SCALE => new FieldDefinition(5, ORACLE_FIELD_NUM_SCALE, PARSE_AS_INT)
);
return $fields;
}
/**
* Gets the table definition mapper for the database connector
*
* @return array The table mapper as KeyValued<String,String> array
*/
public function get_table_definition_mapper()
{
$map = array(
ORACLE_FIELD_COL_ORDER => TAB_DEF_INDEX,
ORACLE_FIELD_COL_NAME => TAB_DEF_NAME,
ORACLE_FIELD_DATA_TP => TAB_DEF_TYPE,
ORACLE_FIELD_CHAR_LENGTH => TAB_DEF_CHAR_LENGTH,
ORACLE_FIELD_NUM_PRECISION => TAB_DEF_NUM_PRECISION,
ORACLE_FIELD_NUM_SCALE => TAB_DEF_NUM_SCALE
);
return $map;
}
/**
* Prepares sql_text using connection and returns the statement identifier,
* which can be used with oci_execute().
*
* @param resource $connection ORACLE active connection
* @param string $sql The SQL text statement
* @return resource Returns a statement handle on success,
* or a connection Error if fails
*/
private function parse($connection, $sql, $variables = null)
{
if (!$connection)
throw new Exception(ERR_NOT_CONNECTED);
$statement = oci_parse($connection, $sql);
if ($statement && isset($variables) && is_array($variables))
$this->bind($statement, $variables);
return $statement ? $statement : $this->error($sql);
}
/**
* Binds a PHP variable to an Oracle placeholder
*
* @param resource $statement
* @param array $variables The colon-prefixed bind variables placeholder used in the statement.
* @return void
*/
private function bind($statement, $variables)
{
foreach ($variables as &$value)
oci_bind_by_name($statement, ":" . $value->bv_name, $value->variable);
}
}
/**
* Creates a SID connection to an ORACLE database
* @param string $host The connection host address
* @param string $SID The database name or Service ID
* @param string $port Oracle connection port
* @return string The oracle connection string
*/
function create_SID_connection($host, $SID, $port)
{
$strConn = "(DESCRIPTION=(ADDRESS_LIST = (ADDRESS = (PROTOCOL = TCP)(HOST = $host)(PORT = $port)))(CONNECT_DATA=(SID=$SID)))";
return $strConn;
}
?><file_sep>/testing/HasamiUtilsTester.php
<?php
/**
* This file test the functionality for the Hasami Utils
*
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
include_once "utils/HasamiUtilsTestUtils.php";
//Test Response
$response = (object)array(
"msg" => "",
"status" => true,
"error" => ""
);
//0: Reads the body
$body = get_body_as_json();
//1: Pick a test
$test = TEST_VAR_NAME . "_" . $_GET[TEST_VAR_NAME];
//2: Test
$result->{$_GET[TEST_VAR_NAME]} = $test($body);
//Connection is closed automatically calling the kanojo destructor
echo json_encode($result,JSON_PRETTY_PRINT);
?><file_sep>/src/WebServiceContent.php
<?php
/**
* This class obtains the web service content from GET variables, URL parameters, POST body
* and the request method
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
class WebServiceContent
{
/**
* @var array The web service get variables
*/
public $get_variables;
/**
* @var array The web service url parameters, the
* parameters can be associated in pairs or by given index. The parameters are
* specified in the UrabeSettings
*/
public $url_params;
/**
* @var object The web service body is expected to be in a JSON
* When the service is GET the body is NULL
*/
public $body;
/**
* @var object Use this variable to insert extra data needed when executing a service operation
*/
public $extra;
/**
* @var string The web service request method
*/
public $method;
/**
* @var array The properties names contained in the body
*/
private $property_names_cache;
/**
* __construct
*
* Initialize a new instance of the web service content
*/
public function __construct()
{
$this->get_variables = array();
//GET Variables
foreach ($_GET as $key => $value)
$this->get_variables[$key] = $value;
//URL parameters
if (isset($_SERVER['PATH_INFO']))
$this->url_params = explode('/', trim($_SERVER['PATH_INFO'], '/'));
else
$this->url_params = array();
//POST content, must be a JSON string
$this->body = file_get_contents('php://input');
$this->body = json_decode($this->body);
//The Request method
$this->method = $_SERVER['REQUEST_METHOD'];
//Initialize $extra as an empty object
$this->extra = (object)array();
}
/**
* This function check if the given variable name is defined in the current
* web service GET variables
*
* @param string $var_name The variable name
* @return true Returns true when the variable is defined
*/
public function in_GET_variables($var_name)
{
return in_array($var_name, array_keys($this->get_variables));
}
/**
* This function check if the given variable name is defined in the current
* web service GET variables and if the variable value is equals to the given value
*
* @param string $var_name The variable name
* @param mixed $value The value to compare
* @return true Returns true when the variable is defined
*/
public function GET_variable_equals($var_name, $value)
{
return $this->in_GET_variables($var_name) && $this->get_variables[$var_name] == $value;
}
/**
* This function picks the GET variables values by name and returns them in an array
* if the value to pick is not in the GET variables it throws and exception
*
* @param array $var_names The variables name to pick its values
* @return array The picked values in the given variable names order
*/
public function pick_GET_variable(...$var_names)
{
$values = array();
$keys = array_keys($this->get_variables);
foreach ($var_names as $var_name) {
if (in_array($var_name, $keys))
array_push($values, $this->get_variables[$var_name]);
else
throw new Exception(sprintf(ERR_INCOMPLETE_DATA, CAP_GET_VARS, "'" . implode("', '", $var_names) . "'"));
}
return $values;
}
/**
* If the request method is GET the filter is extracted from the GET Variables
* otherwise is searched in the body
*
* @return mixed filter value
*/
public function get_filter()
{
if ($this->method == 'GET' && $this->in_GET_variables('filter'))
return $this->get_variables['filter'];
else if (isset($this->body) && property_exists($this->body, 'filter'))
return $this->body->filter;
else
return null;
}
/**
* Gets the GET variables names as a string array
*
* @return array The array of GET variables names
*/
public function get_variables_names()
{
return array_keys($this->get_variables);
}
/**
* Validates if the passed variables names are contained in the web service content.
* As the fields are considered obligatory, they must appear in the GET variables
* otherwise an exception will be thrown.
* @param array $variables The primary key column name
* @throws Exception Throws an Exception if any of the variables are not presented in the GET variables
* @return boolean True if all variables names are defined in GET variables
*/
public function validate_obligatory_GET_variables(...$variables)
{
$obl_variables_count = 0;
for ($i = 0; $i < count($variables); $i++)
if ($this->in_GET_variables($variables[$i]))
$obl_variables_count++;
if (count($variables) == $obl_variables_count)
return true;
else
throw new Exception(sprintf(ERR_INCOMPLETE_DATA, CAP_GET_VARS, "'" . implode("', '", $variables) . "'"));
}
/**
* Validates if the passed properties names are contained in the body of the web service content.
* As the fields are considered obligatory, they must appear in the body
* otherwise an exception will be thrown.
* @param array $properties The property name
* @throws Exception Throws an Exception if any of the properties are not presented in the body
* @return boolean True if all properties names are defined in body
*/
public function validate_obligatory_body_properties(...$properties)
{
$obl_properties_count = 0;
for ($i = 0; $i < count($properties); $i++)
if ($this->in_body($properties[$i]))
$obl_properties_count++;
if (count($properties) == $obl_properties_count)
return true;
else
throw new Exception(sprintf(ERR_INCOMPLETE_BODY, "'" . implode("', '", $properties) . "'"));
}
/**
* This function check if the given property name is defined in the current
* web service body
*
* @param string $property_name The property name
* @return true Returns true when the body property is presented
*/
public function in_body($property_name)
{
if (is_null($this->body))
return false;
if (is_null($this->property_names_cache))
$this->property_names_cache = array_keys(get_object_vars($this->body));
$result = in_array($property_name, $this->property_names_cache);
return $result;
}
/**
* Builds a simple condition using a column name and comparing to a given value.
* The value is extracted from GET variables when GET verbose is presented and from
* the body for other verbose
*
* @param string $column_name The column name
* @return array One record array with key value pair value, column_name => condition_value
*/
public function build_simple_condition($column_name)
{
$result = array();
if ($column_name == null)
$result = null;
else if ($this->method == 'GET' && $this->in_GET_variables(NODE_CONDITION))
$result["$column_name"] = $this->get_variables[NODE_CONDITION];
else if ($this->in_body(NODE_CONDITION))
$result[$column_name] = $this->body->{NODE_CONDITION};
else
$result = null;
return $result;
}
/**
* This method throws an exception when the action is
* access by a not valid allowed method.
*
* @param array ...$allowed_methods A string array containing the allowed methods
* @return void
*/
public function restrict_by_content(...$allowed_methods)
{
if (!in_array($this->method, $allowed_methods))
throw new Exception(sprintf(ERR_SERVICE_RESTRICTED, $this->method));
}
}
?><file_sep>/src/HasamiUtils.php
<?php
/******************************************
********** Connection utils **************
*****************************************/
/**
* Saves the connection data extracted from a
* KanojoX Object
*
* @param string $file_path The path where the file is going to be saved
* @param KanojoX $kanojo The Kanojo connection object
* @throws Exception An Exception is thrown if theres an error creating the file
* @return void
*/
function save_connection($file_path, $kanojo)
{
$data = array(
"connection" =>
array(
"host" => $kanojo->host,
"user_name" => $kanojo->user_name,
"password" => $<PASSWORD>,
"port" => $kanojo->port,
"db_name" => $kanojo->db_name
),
"driver" => DBDriver::getName($kanojo->db_driver)
);
if ($kanojo->db_driver == DBDriver::ORACLE)
$data["owner"] = $kanojo->owner;
else if ($kanojo->db_driver == DBDriver::PG)
$data["schema"] = $kanojo->schema;
$dir = dirname($file_path);
if (!file_exists($dir))
mkdir($dir, 0755);
if (file_put_contents($file_path, json_encode($data, JSON_PRETTY_PRINT)) == false)
throw new Exception(ERR_SAVING_JSON);
}
/**
* Reads a connection file and returns the database connector object as KanojoX Class
*
* @param string $file_path The path where the file is located
* @throws Exception An Exception is thrown if theres a problem reading the file
* @return KanojoX The connection object
*/
function get_KanojoX_from_file($file_path)
{
$kanojoObj = open_json_file($file_path);
$driver = $kanojoObj->driver;
if ($driver == "ORACLE") {
$kanojo = new ORACLEKanojoX();
$kanojo->owner = $kanojoObj->owner;
} else if ($driver == "PG") {
$kanojo = new PGKanojoX();
$kanojo->schema = $kanojoObj->schema;
} else if ($driver == "MYSQL")
$kanojo = new MYSQLKanojoX();
else
throw new Exception("Driver " + (isset($driver) ? $driver . "not supported." : " not valid."));
$kanojo->init($kanojoObj->connection);
return $kanojo;
}
/**
* Gets the table from a table and the default connector.
*
* @param KanojoX $connector The database connector
* @param string $table_name The name of the table, without schema or owner
* @return array The table definition column array
*/
function get_table_definition($connector, $table_name)
{
$connector->connect();
$parser = new MysteriousParser($connector->get_table_definition_parser());
$connector->parser = $parser;
$parser->parse_method = "parse_table_field_definition";
$parser->column_map = $connector->get_table_definition_mapper();
$sql = $connector->get_table_definition_query($table_name);
$result = $connector->fetch_assoc($sql, null);
return $result;
}
/**
* Gets the table from a table and the default connector.
*
* @param KanojoX $connector The database connector
* @param string $table_name The name of the table
* @return array The table definition column array
*/
function load_table_definition($table_name)
{
$file_path = KanojoX::$settings->table_definitions_path . "$table_name.json";
if (file_exists($file_path)) {
$json = open_json_file($file_path);
$fields = array();
foreach ($json->columns as $column_name => $field_data)
$fields[$column_name] = FieldDefinition::create($field_data);
return $fields;
} else
throw new Exception(ERR_SAVING_JSON);
}
/**
* Saves the table definition in a JSON file
*
* @param string $table_name The table name
* @param DBDriver $driver The database driver
* @param string $content The table definition content
* @throws Exception An Exception is thrown if theres an error saving the file
* @return void
*/
function save_table_definition($table_name, $driver, $content)
{
$file_path = KanojoX::$settings->table_definitions_path . "$table_name.json";
$data = array("table_name" => $table_name, "driver" => DBDriver::getName($driver), "columns" => $content);
if (file_put_contents($file_path, json_encode($data, JSON_PRETTY_PRINT)) == false)
throw new Exception(ERR_SAVING_JSON);
}
/**
* Check if a file of a table definition exists
*
* @param string $table_name The table name
* @return boolean True if the table definition file exists
*/
function table_definition_exists($table_name)
{
$file_path = KanojoX::$settings->table_definitions_path . "$table_name.json";
return file_exists($file_path);
}
/**
* Gets the table definition store path
*
* @return string The table definition store path
*/
function get_table_definition_store_path()
{
if (is_null(KanojoX::$settings))
KanojoX::$settings = require "UrabeSettings.php";
return KanojoX::$settings->table_definitions_path;
}
/*************************************
************ File utils *************
*************************************/
/**
* Creates a JSON object from a JSON file
*
* @param string $file_path The JSON file path
* @throws Exception An Exception is thrown if theres an error reading the file
* @return object The JSON Object
*/
function open_json_file($file_path)
{
if (file_exists($file_path)) {
$file_string = file_get_contents($file_path);
//Remove escaping characters
$file_string = preg_replace('!/\*.*?\*/!s', '', $file_string);
$file_string = preg_replace('/(\/\/).*/', '', $file_string);
$file_string = preg_replace('/\n\s*\n/', "\n", $file_string);
//Encode as UTF8
$file_string = utf8_encode($file_string);
$json_object = json_decode($file_string);
if (is_null($json_object))
throw new Exception(sprintf(ERR_READING_JSON_FILE, $file_path));
else
return $json_object;
} else
throw new Exception(sprintf(ERR_READING_JSON_FILE, $file_path));
}
// /******************************************
// ************ Default queries *************
// *****************************************/
// /**
// * Select all fields from the table that matches the condition
// * where the primary key is equals to value.
// *
// * @param HasamiWrapper $service The web service wrapper
// * @param mixed $value The value to match in the condition
// * @param boolean $encode True if the output is encoded as a JSON string
// * @return QueryResult|string The query result or the JSON string
// */
// function select_by_primary_key($service, $value, $encode = false)
// {
// try {
// $query = "SELECT * FROM %s WHERE %s = $value";
// $query = sprintf($query, $service->table_name, $service->primary_key);
// $response = $service->connector->select($query, $service->parser, $encode);
// } catch (Exception $e) {
// $response = get_error_response($e, $encode);
// }
// return $response;
// }
// /**
// * Select all fields from the table.
// *
// * @param HasamiWrapper $service The web service wrapper
// * @param boolean $encode True if the output is encoded as a JSON string
// * @return QueryResult|string The query result or the JSON string
// */
// function select_all($service, $encode = false)
// {
// try {
// $query = "SELECT * FROM %s";
// $query = sprintf($query, $service->table_name);
// $response = $service->connector->select($query, $service->parser, $encode);
// } catch (Exception $e) {
// $response = get_error_response($e, $encode);
// }
// return $response;
// }
/******************************************
********* HTTP Response Result ***********
*****************************************/
/**
* Creates an error response from an exception error
*
* @param Exception $e The exception error
* @param string $query The query that raises the error.
* @param boolean $encode True if the output is encoded as a JSON string
* @return QueryResult|string The error response as QueryResult or a JSON string
*/
function get_error_response($e, $query = "", $encode = false)
{
$response = new QueryResult();
$response->query_result = false;
$response->query = $query;
$response->error = $e->getMessage();
if ($encode)
$response = json_encode($response);
return $response;
}
/************************************
********* Misc functions ***********
************************************/
/**
* Creates a pretty json print from a JSON object, defining a pretty
* print format.
*
* @param stdClass $json The JSON data to format
* @param JsonPrettyStyle $style The JSON pretty format
* @param bool $bg_black True if a dark background is applied otherwise the background will be white
* @return string The response encoded as a pretty HTML
*/
function pretty_print_format($json, $style, $bg_black = true)
{
$bg_color = $bg_black ? '#394034' : '#B1D9D2';
$html = "";
$html .= '<html><head>' .
'<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css">' .
'<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>' .
'<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"></script>' .
'<style>' .
'body { background-color: ' . $bg_color . '} ' .
//'div { padding:0; margin:0; display:inline-block; float:left; }' .
'</style>' .
'</head>' .
'<body>';
$format = new JsonPrettyPrint($style);
$html .= $format->get_format($json);
$html .= '</body></html>';
return $html;
}
/**
* From the current request body create a JSON object
*
* @return stdClass The JSON body
*/
function get_body_as_json()
{
$body = file_get_contents('php://input');
$body = json_decode($body);
return $body;
}
/**
* Removes from this base array all of its elements that are contained by the given array keys.
* @param array $base_array The array to modify
* @param array $array_keys The array keys to be removed.
* @return void
*/
function array_remove(&$base_array, $array_keys)
{
foreach ($array_keys as &$key)
unset($base_array[$key]);
}
<file_sep>/docs/KanojoXDoc.md
# Introduction
`KanojoX` acts as **Urabe** core, it wraps most common functions of a database and unifies them allowing to work transparently between database without changing our code.
The `KanojoX` class is an abstract class must be inherit by each supported driver, currently the application has compatibility with **ORACLE**, **PG** and **MYSQL**. The connector is composed by a group of connection variables, a module with database basic functionality, a result parser, a database model mapper and an error handler.
## Connection variables
To create a connection each `KanojoX` driver implements a different connection method, that uses all or some of the following variables.
| Access | Var | Data type | Description |
| -------- | ---- | ----------- |--------|
| public | **$host** | _string_ | Can be either a host name or an IP address |
| public | **$port** | _int _ | The connection port |
| public | **$db_name** | _string_ | The database name |
| public | **$user_name** | _string_ | The connection user name |
| public | **$password** | _string_ | The connection password |
This variables can be initialize calling the `KanojoX::init` method. This method received a decoded JSON object
```php
void public function init($body_json);
```
### init() function description
| Name | Data type | Description |
| - | - | - |
| **`$body_json`** | **object** | The request body as decoded JSON object |
**Warnings:** An Exception is thrown if `$body_json` parameter is null
**returns:** _void_
## Content
The content of the class is divided in the following links click any topic for details.
1. [Database basic functionality](https://github.com/ANamelessWolf/urabe/wiki/KanojoX-Class,-database-functionality)
2. [Result parser](https://github.com/ANamelessWolf/urabe/wiki/KanojoX-Class,-result-parser)
3. [Database model mapper](https://github.com/ANamelessWolf/urabe/wiki/KanojoX-Class,-model)
4. [Error and Exception Handling](https://github.com/ANamelessWolf/urabe/wiki/KanojoX-Class,-error-and-exeception-handling)
## Database basic functionality
`KanojoX` defines the connector basic functionality, the connector wraps the driver connection actions and database manipulation.
### Database basic functionality Content
- [connect](https://github.com/ANamelessWolf/urabe/wiki/KanojoX-Class,-database-functionality#connect)
- [close](https://github.com/ANamelessWolf/urabe/wiki/KanojoX-Class,-database-functionality#close)
- [free_result](https://github.com/ANamelessWolf/urabe/wiki/KanojoX-Class,-database-functionality#free_result)
- [execute](https://github.com/ANamelessWolf/urabe/wiki/KanojoX-Class,-database-functionality#execute)
- [fetch_assoc](https://github.com/ANamelessWolf/urabe/wiki/KanojoX-Class,-database-functionality#fetch_assoc)
- [error](https://github.com/ANamelessWolf/urabe/wiki/KanojoX-Class,-database-functionality#error)
### connect()
Initialize a database connection using the default connection settings. When calling this method, expects connection variables are already loaded, to ensure all properties are loaded is recommended initialize them with the `KanojoX::init` method . Once the variables are initialized this method opens a database connection and returns the connection object or the connection resource.
```php
object|resource|ConnectionError public function connect();
```
#### connect() function description
| Name | Data type | Description |
| -------- | - | - |
| **NONE** | - | - |
**Example:** Connect to ORACLE
```php
//1: A new instance of KanojoX of type ORACLEKanojoX
$kanojo = new ORACLEKanojoX();
$kanojo->owner = $body->owner;
//2: Initialize the connection before calling the connect method
$kanojo->init($body->connection);
//3: Connect to the Database
$conn = $kanojo->connect();
```
**return:** The connection object or a `ConnectionError` if the connection data is not valid to open the connection.
**Note:** Once connected, the current connection can be accessed via the connection property `KanojoX->connection`.
### close()
This method closes the current connection and calls `free_result()` method, it's recommended used only when multiple connections exists and the connection wants to be closed before the object is destructed. By default the `__destruct` method executes this method, if the object was already closed, the `__destruct` does nothing.
```php
bool public function close();
```
#### close() function description
| Name | Data type | Description |
| -------- | - | - |
| **NONE** | - | - |
**Example:** Close the connection to ORACLE
```php
$is_closed = $kanojo->close();
```
**return:** True if the connection is closed otherwise false.
**Warnings:** An Exception is thrown if this method is called when no connection to the database is available.
### free_result()
Frees stored result memory for the given statements. Methods like `fetch_assoc()` and `execute()` create stored result memory.
```php
void public free_result();
```
#### free_result() function description
| Name | Data type | Description |
| -------- | - | - |
| **NONE** | - | - |
**Example:** Free stored result memory
```php
$is_closed = $kanojo->free_result();
```
**return:** _void_
### execute()
Execute a prepared statement with or without parameters, waits for the result and returns it as a web service response of type `UrabeResponse`.
```php
UrabeResponse public execute($sql, $variables = null);
```
#### execute() function description
| Name | Data type | Description |
| ------- | ------ | - |
| **`sql`** | **string** | The SQL Statement to execute |
| _`$variables`_ | **mixed[]** | The colon-prefixed bind variables placeholder values used in the statement in order. |
**Example:** Inserts a new row
```php
$kanojo = new ORACLEKanojoX();
$result = $kanojo->execute("INSERT INTO TABLE_NAME(ID, COLUMN_NAME) VALUES(?,?)",
array(1, "data"));
echo json_encode($result);
```
**return:** The query result as a web service response
The echo output:
```json
{
"succeed": true,
"affected_rows": 1,
"result": [],
"error": null,
"query": {
"sql": "INSERT INTO TABLE_NAME(ID, COLUMN_NAME) VALUES(?,?)",
"parameters": [
1,
"data"
]
}
}
```
### fetch_assoc()
Execute a selection query with or without placeholders, waits for the result and returns an associative array containing the resulting set. Each row entry is parsed using the `KanojoX->parser` property, by default the parser return its values associative `column_name>>column_value`.
```php
array public fetch_assoc($sql, $variables = null);
```
#### fetch_assoc() function description
| Name | Data type | Description |
| ------- | ------ | - |
| **`sql`** | **string** | The SQL Statement to execute |
| _`$variables`_ | **mixed[]** | The colon-prefixed bind variables placeholder values used in the statement in order. |
**Example:** Selects some data
```php
$kanojo = new ORACLEKanojoX();
$result = $kanojo->fetch_assoc("SELECT * FROM USER");
echo json_encode($result);
```
**return:** The selected data with the parser format
```json
[
{
"id": "1",
"u_name": "user1",
"u_pass": "<PASSWORD>"
},
{
"id": "2",
"u_name": "user2",
"u_pass": "<PASSWORD>"
}
]
```
### error()
Gets the last error message caused by the current connection or a prepare statement. The message is formatted using fields configurable in application settings. This function is used internally but has to be implemented in each connection driver.
To select the last error call the method `Kanojo->get_last_error()`.
#### error() function description
| Name | Data type | Description |
| ------- | ------ | - |
| **`sql`** | **string** | The SQL Statement to execute |
| _`$error`_ | **ConnectionError** | The connection error that cause this error |
```php
ConnectionError public error($sql, $error= null);
```
**Example:** Print some error
```php
trigger_error('Trigger Error', E_USER_WARNING);
return $kanojo->get_last_error();
```
**return:** The connection error
The output result
```json
{
"code": 512,
"message": "Trigger Error",
"sql": null,
"file": "C:\\xampp\\htdocs\\urabe\\testing\\utils\\KanojoXTestUtils.php",
"line": 106
}
```
## Result parser
`KanojoX` defined a property of type `MysteriousParser` that allows to returns the fetched values as defined in the `MysteriousParser` callback.
| Access | Var | Data type | Description |
| -------- | ---- | ----------- |--------|
| public | **$parser** | _MysteriousParser _ | Defines the way the result data is parsed |
As soon the class is constructed a default parser is defined
```php
public function __construct() {
$this->parser = new MysteriousParser();
}
```
The parser can be changed at any moment before a `fetch_assoc` function call, if you see the body of any implemented `fetch_assoc` you will notice the following line.
For example let see the code for the class `PGKanojoX->fetch_assoc`
```php
$rows = array(); //Here the selected rows are stored
...
while ($row = pg_fetch_assoc($ok))
$this->parser->parse($rows, $row);
...
return $rows;
```
As you can see the parser receive the fetched `$row` parse it values with the method `MysteriousParser->parse` and store the parsed result in the row collection `$rows`.
### MysteriousParser Class
This class defines a parser used in the associative fetch of some SQL selection query result. This class contemplates two ways to parsed data with a table definition or with some special customization.
Lets call this parser methods
- The Table definition method
- The Special customization method
Optional while parsing the column names can be renamed using a column map array that defines the column name mapping. The mapped values are passed as a key value pair, where the key is the database column name and the value is the desired name.
#### MysteriousParser properties
| Access | Var | Data type | Description |
| -------- | ---- | ----------- |--------|
| public | **$table_definition** | _FieldDefinition[]_ | The table columns definitions as an array of `FieldDefinition`. |
| public | **$column_map** | _string[] _ | Defines how the columns are mapped to the message response, if null the columns maintains the database column names. This values are case sensitive |
| public | **$parse_method** | _callback_ | The parse method, defined as an anonymous function |
#### Initializing the parser
The constructor create two default parse methods depending on the constructor input parameters.
##### __construct()
| Name | Data type | Description |
| ------- | ------ | - |
| _`$table_definition`_ | **FieldDefinition[]** | The table fields definition. |
```php
public function __construct($table_definition = null)
```
#### Table definition method
When table definition is presented in the constructor the fetched data is parsed using the `parse_with_field_definition` function.
```php
//Table definition method
public function __construct($table_definition = null){
...
$this->parse_method = function ($parser, &$result, $row) {
$this->parse_with_field_definition($result, $row);
};
...
}
```
The table definition method parses only the columns defined in the table definition array and map the column names if `column_map` is available.
##### parse_with_field_definition() function description
| Name | Data type | Description |
| ------- | ------ | - |
| _`$result`_ | **array[]** | The collection of rows where the parsed rows are stored |
| _`$row`_ | **mixed[]** | The selected row picked from the fetch assoc process.
```php
void public function parse_with_field_definition(&$result, $row);
```
**Example:**
We have the following table definition
```json
[
{
"column_index": 1,
"column_name": "id",
"data_type": "integer",
"char_max_length": null,
"numeric_precision": 32,
"numeric_scale": 0
},
{
"column_index": 2,
"column_name": "u_name",
"data_type": "character varying",
"char_max_length": 45,
"numeric_precision": null,
"numeric_scale": null
}
]
```
While fetching we select the following row
```php
$row = array("id"=>115, "u_name"=>"Mike", "pass"=>"<PASSWORD>", "register_year"=>2018);
```
With no mapping the parsed row will return
```php
$row = array("id"=>115, "u_name"=>"Mike");
```
Defining the following column mapping the result will be
```php
//Define the mapping updating the Mysterious parser column_map property
$parser->column_map = array("id"=>"user_id", "u_name"=>"user_name");
...
$row = array("user_id"=>115, "user_name"=>"Mike");
```
#### The Special customization method
When table definition is not presented in the constructor the fetched data is parsed using the callback stored in the property `$parse_method`.
The callback `MysteriousParser->parse_method` has the following body:
| Name | Data type | Description |
| ------- | ------ | - |
| _`$parser`_ | **MysteriousParser** | Reference the parser instance, that is executing the parsing callback |
| _`&$result`_ | **array[]** | The row collection, where the parsed rows are stored |
| _`$row`_ | **mixed[]** | The selected row picked from the fetch assoc process.
```php
function ($parser, &$result, $row);
```
By default the constructor defined the following parser
```php
MysteriousParser->parse_method = function ($parser, &$result, $row) {
array_push($result, $row);
};
```
**Note:** Column mapping only is available in costume parser, and is accessed by `$parser->column_map`. A simple example for using a costume parser with column mapping.
```php
MysteriousParser->column_map = array("id"=>"user_id", "u_name"=>"user_name");
MysteriousParser->parse_method = function ($parser, &$result, $row) {
$mapping = $parser->column_map;
$mapped_row = array();
foreach ($row as $column_name => $column_value){
$key = $mapping[$column_name];
$mapped_row[$key] =$column_value;
}
array_push($result, $mapped_row);
};
```
## Database model mapper
The connector implements the functionality to select automatically the table definition, implementing the driver definition. The following methods are used internally, defined in each `KanojoX` connection driver.
### get_table_definition_query()
Gets the selection query for selecting the table definition
| Access | Var | Data type | Description |
| -------- | ---- | ----------- |--------|
| public | **$table_name** | _string_ | The table name. |
```php
string public function get_table_definition_query($table_name);
```
**Example:** For example the return selection query for the table `users` in ORACLE is
```sql
SELECT 'COLUMN_ID', 'COLUMN_NAME', 'DATA_TYPE',
'CHAR_LENGTH', 'DATA_PRECISION', 'DATA_SCALE'
FROM ALL_TAB_COLS
WHERE TABLE_NAME = 'users' AND OWNER = 'public'
```
**return:** The selection query
### get_table_definition_parser()
Gets the table definition parser for the database connector, the column names are mapped and the mapped column names are defined in `Warai.php`.
| Access | Var | Data type | Description |
| -------- | ---- | ----------- |--------|
| public | **NONE** | ----------- | ----------- |
```php
FieldDefinition[] public function get_table_definition_parser();
```
**return:** The array of field definition
A similar encoded output
```json
[
{
"column_index": 1,
"column_name": "id",
"data_type": "integer",
"char_max_length": null,
"numeric_precision": 32,
"numeric_scale": 0
},
{
"column_index": 2,
"column_name": "u_name",
"data_type": "character varying",
"char_max_length": 45,
"numeric_precision": null,
"numeric_scale": null
}
]
```
### get_table_definition_mapper()
Gets the table definition mapper for the database connector and returns an associative array.
| Access | Var | Data type | Description |
| -------- | ---- | ----------- |--------|
| public | **NONE** | ----------- | ----------- |
```php
string[] public function get_table_definition_mapper()
```
**Example:** Default column mapping for `ORACLEKanojoX` class.
```php
$map = array(
ORACLE_FIELD_COL_ORDER => TAB_DEF_INDEX,
ORACLE_FIELD_COL_NAME => TAB_DEF_NAME,
ORACLE_FIELD_DATA_TP => TAB_DEF_TYPE,
ORACLE_FIELD_CHAR_LENGTH => TAB_DEF_CHAR_LENGTH,
ORACLE_FIELD_NUM_PRECISION => TAB_DEF_NUM_PRECISION,
ORACLE_FIELD_NUM_SCALE => TAB_DEF_NUM_SCALE
);
return $map;
```
## Error and Exception Handling
`Urabe` manage errors and exception using `KanojoX`, unhandled exceptions and non fatal error as returned as web service response. Errors and Exception handling are configure in the configuration file, [UrabeSettings.php](https://github.com/ANamelessWolf/urabe/blob/master/src/UrabeSettings.php).
### Configuration settings
The following setting configure the web service response format. To modify the current configuration setting it can be access from the static property `KanojoX::$settings`.
| Settings | Type | Default | Description |
| -------- | ------------ | -------- | ------------ |
| **handle_errors** | _bool_ | `true` | If sets to true `Urabe` handles errors as defined in the `KanojoX` Class |
| **handle_exceptions** | _bool_ | `true` | If sets to true `Urabe` handles exceptions as defined in the `KanojoX` Class |
| **show_error_details** | _bool_ | `true` | If sets to true and `Urabe` handles exceptions, the error details such as file, line, error code and context are shown in the response |
| **show_error_context** | _bool_ | `false` | If sets to true and `Urabe` handles exceptions, the error context is shown in the response |
| **enable_stack_trace** | _bool_ | `false` | If sets to true and `Urabe` handles exceptions, the stack trace will be added to the response |
| **add_query_to_response** | _bool_ | `true` | If sets to true `Urabe` adds the last executed SQL statement in to the response |
| **hide_exception_error** | _bool_ | `false` | If sets to true and `Urabe` adds the last executed error to the service response |
### Error Handler
`KanojoX` function `error_handler` is used to handling errors during run time, it catches a none fatal error or when triggering an error under certain conditions ([trigger_error()](https://php.net/manual/en/function.trigger-error.php)). By default the errors are saved as `ConnectionError` objects and stored in the `KanojoX::$errors` static property.
To access the last error:
```php
ConnectionError public function get_last_error();
```
Accessing the error from any class
```php
$err_index = sizeof(KanojoX::$errors) - 1; //Selecting last error
$error = KanojoX::$errors[$err_index];
```
**Example:** Triggering and retrieving the last executed error.
```php
trigger_error('Trigger Error', E_USER_WARNING);
$error = $kanojo->get_last_error();
```
### Exception Handler
`KanojoX` treats unhandled exception as a `400 HTTP RESPONSE`, when an exception is executed in any part of the API, the method `KanojoX->exception_handler` is called. Allowing `KanojoX` to handle `exceptions` make easier to debug and manage errors in the view.
**Example:** This is a message response from an SQL exception with full details.
```json
{
"message": "Bad query: ORA-00942: table or view does not exist",
"result": [],
"size": 0,
"error": {
"query": "SELECT * FROM table_name WHERE id = 1",
"code": 942,
"file": "C:\\xampp\\htdocs\\urabe\\src\\ORACLEKanojoX.php",
"line": 177,
"err_context": [
{
"error": {
"message": "oci_execute(): ORA-00942: table or view does not exist",
"code": 2,
"file": "C:\\xampp\\htdocs\\urabe\\src\\ORACLEKanojoX.php",
"line": 171,
"err_context": {
"sql": "SELECT * FROM table_name WHERE id = 1",
"variables": null,
"rows": [],
"class": "oci8 statement"
}
}
}
]
},
"stack_trace": "#0 C:\\xampp\\htdocs\\urabe\\testing\\KanojoXTestUtils.php(29): ORACLEKanojoX->fetch_assoc('SELECT ADDR_ID,...')\n#1 C:\\xampp\\htdocs\\urabe\\testing\\KanojoXTester.php(28): test_fetch_assoc_no_params(Object(ORACLEKanojoX), Object(stdClass))\n#2 {main}"
}
```
**Example:** This is a message response from an SQL exception formatted for a staging environment.
```json
{
"message": "Bad query: ORA-00942: table or view does not exist",
"result": [],
"size": 0,
"error": null
}
```<file_sep>/src/KanojoX.php
<?php
include "UrabeSQLException.php";
include "HasamiUtils.php";
include "ConnectionError.php";
include "UrabeResponse.php";
include "MysteriousParser.php";
include "WebServiceContent.php";
require_once "resources/Warai.php";
/**
* Database connection model
*
* Kanojo means girlfriend in japanese and this class saves the connection data structure used to connect to
* an the database.
* @version 1.0.0
* @api Makoto Urabe DB Manager database connector
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
abstract class KanojoX
{
/**
* Defines how the data is parsed while the result is fetch associatively
*
* @var MysteriousParser The selection data parser
*/
public $parser;
/**
* @var array $error
* The application current errors
*/
public static $errors;
/**
* @var array $settings
* Access the application settings.
*/
public static $settings;
/**
* @var int The http error code
*/
public static $http_error_code;
/**
* @var DBDriver The database driver
*/
public $db_driver;
/**
* @var string $host Can be either a host name or an IP address.
*/
public $host = "127.0.0.1";
/**
* @var string $port Connection port
*/
public $port;
/**
* @var string $db_name The database name.
*/
public $db_name;
/**
* @var string $user_name The database connection user name.
*/
public $user_name;
/**
* @var string $password The connection password
*/
public $password = "";
/**
* Returns the collections of statement handled
*
* @var array statementsIds The statements ids collection
*/
public $statementsIds;
/**
* @var resource $connection
* The connection object.
*/
public $connection;
/**
* Returns the number of affected rows
*
* @var int The number of affected rows
*/
public $affected_rows;
/**
* This function initialize Urabe error handling, settings
* and exception handler. This method is called when an instance of KanojoX is created
*
* @return void
*/
public static function start_urabe()
{
KanojoX::$errors = array();
KanojoX::$settings = require "UrabeSettings.php";
if (KanojoX::$settings->handle_errors)
set_error_handler('KanojoX::error_handler');
if (KanojoX::$settings->handle_errors)
set_exception_handler('KanojoX::exception_handler');
}
/**
* Initialize a new instance of the connection object
* @param MysteriousParser $parser Defines how the data is going to be parsed if,
* null the data is parsed associatively column value
*/
public function __construct($parser = null)
{
$this->statementsIds = array();
if (is_null($parser))
$this->parser = new MysteriousParser();
else
$this->parser = $parser;
KanojoX::start_urabe();
}
/**
* Destruct the Kanojo Instance and try to close and free memory if
* is connected and had statement ids.
*/
function __destruct()
{
if ($this->connection)
$this->close();
}
/**
* Initialize the class with a JSON object
*
* @param object $body_json The request body as JSON object
* @throws Exception An Exception is raised when the body is null or missed one or more of the
* following variables: host, user_name, password, port, db_name
* @return void
*/
public function init($body_json)
{
$fields = array("host", "user_name", "password", "port", "db_name");
if (isset($body_json)) {
foreach ($fields as &$value) {
if (isset($body_json->{$value}))
$this->{$value} = $body_json->{$value};
else
throw new Exception(sprintf(ERR_INCOMPLETE_BODY, "initialize", join(', ', $fields)));
}
} else
throw new Exception(ERR_BODY_IS_NULL);
}
/**
* Gets the last executed error
*
* @return ConnectionError The last executed error
*/
public function get_last_error()
{
$errors = KanojoX::$errors;
$index = sizeof($errors) - 1;
return $index >= 0 ? $errors[0] : null;
}
/**
* Handles application errors
*
* @param int $err_no Contains the level of the error raised, as an integer.
* @param string $err_msg The error message, as a string.
* @param string $err_file The filename that the error was raised in, as a string
* @param int $err_line The line number the error was raised at, as an integer
* @param array $err_context an array that points to the active symbol table at the point the error occurred.
* In other words, err_context will contain an array of every variable that existed in the scope the error was triggered in.
* User error handler must not modify error context.
* @return bool Returns a string containing the previously defined error handler.
*/
public static function error_handler($err_no, $err_msg, $err_file, $err_line, $err_context)
{
$error = new ConnectionError();
$error->code = $err_no;
$error->message = $err_msg;
$error->file = $err_file;
$error->line = $err_line;
$error->set_err_context($err_context);
array_push(KanojoX::$errors, $error);
}
/**
* Handles application exceptions
*
* @param exception $exception The generated exception
* @return void
*/
public static function exception_handler($exception)
{
if (is_null(KanojoX::$http_error_code))
http_response_code(400);
else
http_response_code(KanojoX::$http_error_code);
$class = get_class($exception);
$error = new ConnectionError();
$error->code = $exception->getCode();
$error->message = $exception->getMessage();
$error->file = $exception->getFile();
$error->line = $exception->getLine();
if ($class == CLASS_SQL_EXC)
$error->sql = $exception->sql;
$response = new UrabeResponse();
$response->error = $error->get_exception_error();
$err = $response->get_exception_response(
$exception->getMessage(),
KanojoX::$settings->enable_stack_trace ? $exception->getTraceAsString() : null
);
$exc_response = $response->get_exception_response(
$exception->getMessage(),
KanojoX::$settings->enable_stack_trace ? $exception->getTraceAsString() : null
);
//If encoding fails means error context has resource objects that can not be encoded,
//in that case will try the simple exception response
$sql = $exc_response->error[NODE_QUERY];
$exc_response = json_encode($exc_response);
if (!$exc_response) {
$exc_response = $response->get_simple_exception_response(
$exception,
KanojoX::$settings->enable_stack_trace ? $exception->getTraceAsString() : null
);
if (KanojoX::$settings->add_query_to_response)
$exc_response->{NODE_SQL} = $sql;
$exc_response->{NODE_SUCCEED} = false;
$exc_response = json_encode($exc_response);
}
echo $exc_response;
}
/*********************
**** SQL Parsing ****
*********************/
/**
* Gets the placeholders format for the original prepared query string.
* The number of elements in the array must match the number of placeholders.
*
* @param int $index The place holder index if needed
* @return string The place holder at the given position
*/
public function get_param_place_holder($index = null)
{
return '?';
}
/************************
* Shared functionality *
************************/
/**
* Closes a connection
*
* @return bool Returns TRUE on success or FALSE on failure.
*/
abstract public function close();
/**
* Open a Database connection
*
* @return object The database connection object
*/
abstract public function connect();
/**
* Get the last error message string of a connection
*
* @param string|null $sql The last executed statement. Can be null
* @param ConnectionError $error If the error exists pass the error
* @return ConnectionError The connection error
*/
abstract public function error($sql, $error = null);
/**
* Sends a request to execute a prepared statement with given parameters,
* and waits for the result
*
* @param string $sql The SQL Statement
* @param array|null $variables The colon-prefixed bind variables placeholder used in the statement, can be null.
* @throws Exception This method is not implemented in the abstract class
* @return UrabeResponse Returns the service response formatted as an executed response
*/
abstract public function execute($sql, $variables = null);
/**
* Returns an associative array containing the next result-set row of a
* query. Each array entry corresponds to a column of the row.
* This function is typically called in a loop until it returns FALSE,
* indicating no more rows exist.
*
* @param string $sql The SQL Statement
* @param array $variables The colon-prefixed bind variables placeholder used in the statement.
* @return array Returns an associative array. If there are no more rows in the statement then the connection error is returned.
* */
abstract public function fetch_assoc($sql, $variables);
/**
* Frees the memory associated with a result
*
* @return void
*/
abstract public function free_result();
/**
* Gets the query for selecting the table definition
*
* @param string $table_name The table name
* @return string The table definition selection query
*/
abstract public function get_table_definition_query($table_name);
/**
* Gets the table definition parser for the database connector
*
* @return array The table definition fields as an array of FieldDefinition
*/
abstract function get_table_definition_parser();
/**
* Gets the table definition mapper for the database connector
*
* @return array The table mapper as KeyValued<String,String> array
*/
abstract function get_table_definition_mapper();
}
?><file_sep>/src/Urabe.php
<?php
include_once "ORACLEKanojoX.php";
include_once "PGKanojoX.php";
include_once "MYSQLKanojoX.php";
include_once "FieldDefinition.php";
/**
* A Database connection manager
*
* Urabe is the main protagonist in the Nazo no Kanojo X, this class manage and wraps all transactions to the database.
* Given the Kanojo profile Urabe should be able to connect with ORACLE, PG and MySQL
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
class Urabe
{
/**
* @var KanojoX $connector
* Defines the database connector
*/
private $connector;
/**
* Gets the current connection data
*
* @return Object current connection data
*/
public function get_connection_data()
{
return array(
"db_driver" => DBDriver::getName($this->connector->db_driver),
"host" => $this->connector->host,
"port" => $this->connector->port,
"db_name" => $this->connector->db_name,
"user_name" => $this->connector->user_name,
//"password" = $this->connector->password
);
}
/**
* @var string $is_connected
* Check if there is an active connection to the database.
*/
public $is_connected;
/**
* Gets the current parser
*
* @return MysteriousParser The current parser
*/
public function get_parser()
{
return $this->connector->parser;
}
/**
* Sets the current parser
*
* @param MysteriousParser $parser The current parser
* @return void
*/
public function set_parser($mys_parser)
{
$this->connector->parser = $mys_parser;
}
/**
* __construct
*
* Initialize a new instance of the Urabe Database manager.
* The connection is opened in the constructor should be closed using close method.
* @param KanojoX $connector The database connector.
*/
public function __construct($connector)
{
if (isset($connector)) {
$this->connector = $connector;
$this->connector->connect();
if ($this->connector) {
$this->is_connected = true;
$this->database_name = $this->connector->db_name;
} else {
$this->is_connected = false;
$this->error = $this->connector;
}
} else
throw new Exception(ERR_BAD_CONNECTION);
}
/**
* Creates a cloned version of this instance, the connection is copied
* without the parser
*
* @return Urabe The instance clone
*/
public function get_clone()
{
//var_dump($this->connector->db_driver);
$driver = $this->connector->db_driver;
if (DBDriver::MYSQL == $driver)
$connector = new MYSQLKanojoX();
else if (DBDriver::ORACLE == $driver) {
$connector = new ORACLEKanojoX();
$connector->owner = $this->connector->owner;
} else if (DBDriver::PG == $driver) {
$connector = new PGKanojoX();
$connector->schema = $this->connector->schema;
}
$connector->host = $this->connector->host;
$connector->port = $this->connector->port;
$connector->db_name = $this->connector->db_name;
$connector->user_name = $this->connector->user_name;
$connector->password = $this-><PASSWORD>;
return new Urabe($connector);
}
/**
* Execute an SQL selection query and parse the data as defined in the parser.
* If the parser is null uses the parser defined in the connector object KanojoX::parser
*
* @param string $sql The SQL statement
* @param array $variables The colon-prefixed bind variables placeholder used in the statement.
* @param MysteriousParser $row_parser The row parser.
* @throws Exception An Exception is thrown if not connected to the database or if the SQL is not valid
* @return UrabeResponse The SQL selection result
*/
public function select($sql, $variables = null, $row_parser = null)
{
if ($this->is_connected) {
$response = new UrabeResponse();
//1: Select row parsing method
if (isset($row_parser)) //&& is_callable($row_parser->parse_method))
$this->connector->parser = $row_parser;
//2: Executes the query and fetches the rows as an associative array
$result = $this->connector->fetch_assoc($sql, $variables);
//3: Formats response
$result = $response->get_response(INF_SELECT, $result, $sql);
return $result;
} else
throw new Exception($this->connector->error);
}
/**
* Gets the first value found on the first row and firs column.
* If no values are selected a default value is returned
*
* @param string $sql The SQL statement
* @param array $variables The colon-prefixed bind variables placeholder used in the statement.
* @param string $default_val The default value
* @return string The selected value taken from the first row and first column
*/
public function select_one($sql, $variables = null, $default_val = null)
{
$result = $this->connector->fetch_assoc($sql, $variables);
if (sizeof($result) > 0) {
$result = $result[0];
$columns = array_keys($result);
return sizeof($columns) > 0 ? strval($result[$columns[0]]) : $default_val;
} else
return $default_val;
}
/**
* Select all rows and returns just the values from the first selected column.
* Used to select list of elements, no associatively
*
* @param string $sql The SQL statement
* @param array $variables The colon-prefixed bind variables placeholder used in the statement.
* @return array The first column values inside an array.
*/
public function select_items($sql, $variables = null)
{
$result = $this->connector->fetch_assoc($sql, $variables);
$values = array();
if (sizeof($result) > 0) {
$columns = array_keys($result[0]);
$sel_column = sizeof($columns) > 0 ? $columns[0] : null;
if (isset($sel_column))
for ($i = 0; $i < sizeof($result); $i++)
array_push($values, $result[$i][$sel_column]);
}
return $values;
}
/**
* Selects all rows from a given table name, Calling select_all() is identical to calling select() with
* $sql = SELECT * FROM table_name
*
* @param string $table_name The name of the table
* @param MysteriousParser $row_parser The row parser.
* @throws Exception An Exception is thrown if not connected to the database or if the SQL is not valid
* @return UrabeResponse The query result as a JSON String or a query result.
*/
public function select_all($table_name, $row_parser = null)
{
return $this->select(sprintf('SELECT * FROM %s', $table_name), null, $row_parser);
}
/**
* Gets the table definition
*
* @param string $table_name The name of the table
* @throws Exception An exception is thrown when the table doesn't exists.
* @return array The row definition of the table fields.
*/
public function get_table_definition($table_name)
{
if ((strpos($table_name, '.') !== false)) {
$tn = explode('.', $table_name);
$tn = $tn[1];
} else
$tn = $table_name;
return get_table_definition($this->connector, $tn);
}
/**
* Check if a table exists on the database
*
* @param string $table_name The name of the table
* @return bool The query result
*/
public function table_exists($table_name)
{
$result = $this->select($this->connector->get_table_definition_query($table_name), null, null);
return $result->size > 0;
}
/**
* This function is an alias of KanojoX::execute()
*
* @param string $sql The SQL statement
* @param array $variables The colon-prefixed bind variables placeholder used in the statement.
* @throws Exception An Exception is raised if the connection is null or executing a bad query
* @return UrabeResponse Returns the service response formatted as an executed response
*/
public function query($sql, $variables = null)
{
return $this->connector->execute($sql, $variables);
}
/**
* Performs an insertion query into a table
*
* @param string $table_name The table name.
* @param object $values The values to insert as key value pair
* Column names as keys and insert values as associated value, place holders can not be identifiers only values.
* @throws Exception An Exception is raised if the connection is null or executing a bad query
* @return UrabeResponse Returns the service response formatted as an executed response
*/
public function insert($table_name, $values)
{
$query_format = "INSERT INTO " . $table_name . " (%s) VALUES (%s)";
$columns = array();
$insert_values = array();
$params = array();
$index = 0;
//Build prepare statement
foreach ($values as $column => $value) {
array_push($columns, $column);
array_push($insert_values, $this->connector->get_param_place_holder(++$index));
array_push($params, $value);
}
$columns = implode(', ', $columns);
$insert_values = implode(', ', $insert_values);
$sql = sprintf($query_format, $columns, $insert_values);
$response = $this->query($sql, $params);
return $response;
}
/**
* Performs a bulk insertion query into a table
*
* @param string $table_name The table name.
* @param array $columns The columns as an array of strings
* @param array $values The values to insert as key value pair array.
* Column names as keys and insert values as associated value, place holders can not be identifiers only values.
* @throws Exception An Exception is raised if the connection is null or executing a bad query
* @return UrabeResponse Returns the service response formatted as an executed response
*/
public function insert_bulk($table_name, $columns, $values)
{
$query_format = "INSERT INTO " . $table_name . " (%s) VALUES %s";
$value_format = "(%s)";
$insert_rows = array();
$params = array();
$index = 0;
//Build prepare statement
for ($i = 0; $i < sizeof($values); $i++) {
$insert_values = array();
for ($c = 0; $c < sizeof($columns); $c++) {
array_push($insert_values, $this->connector->get_param_place_holder(++$index));
array_push($params, $values[$i]->{$columns[$c]});
}
array_push($insert_rows, sprintf($value_format, implode(', ', $insert_values)));
}
$columns = implode(', ', $columns);
$insert_rows = implode(', ', $insert_rows);
$sql = sprintf($query_format, $columns, $insert_rows);
$response = $this->query($sql, $params);
return $response;
}
/**
* Performs an update query
*
* @param string $table_name The table name.
* @param array $values The values to update as key value pair array.
* Column names as keys and update values as associated value, place holders can not be identifiers only values.
* @param string $condition The condition to match, this condition should not use place holders.
* @throws Exception An Exception is raised if the connection is null or executing a bad query
* @return UrabeResponse Returns the service response formatted as an executed response
*/
public function update($table_name, $values, $condition)
{
$query_format = "UPDATE $table_name SET %s WHERE %s";
$set_format = "%s = %s";
$update_values = array();
$params = array();
$index = 0;
//Build prepare statement
foreach ($values as $column => $value) {
array_push($update_values, sprintf($set_format, $column, $this->connector->get_param_place_holder(++$index)));
array_push($params, $value);
}
$update_values = implode(', ', $update_values);
$sql = sprintf($query_format, $update_values, $condition);
$response = $this->query($sql, $params);
return $response;
}
/**
* Performs an update query by defining a condition
* where the $column_name has to be equal to the given $column_value.
*
* @param string $table_name The table name.
* @param array $values The values to update as key value pair array.
* Column names as keys and update values as associated value, place holders can not be identifiers only values.
* @param string $column_name The column name used in the condition.
* @param string $column_value The column value used in the condition.
* @throws Exception An Exception is raised if the connection is null or executing a bad query
* @return UrabeResponse Returns the service response formatted as an executed response
*/
public function update_by_field($table_name, $values, $column_name, $column_value)
{
$query_format = "UPDATE $table_name SET %s WHERE $column_name = %s";
$set_format = "%s = %s";
$update_values = array();
$params = array();
$index = 0;
//Build prepare statement
foreach ($values as $column => $value) {
array_push($update_values, sprintf($set_format, $column, $this->connector->get_param_place_holder(++$index)));
array_push($params, $value);
}
array_push($params, $column_value);
$update_values = implode(', ', $update_values);
$sql = sprintf($query_format, $update_values, $this->connector->get_param_place_holder(++$index));
$response = $this->query($sql, $params);
return $response;
}
/**
* Performs a deletion query by defining a condition
* where the $column_name has to be equal to the given $column_value.
*
* @param string $table_name The table name.
* @param string $condition The condition to match, this condition should not use place holders.
* @throws Exception An Exception is raised if the connection is null or executing a bad query
* @return UrabeResponse Returns the service response formatted as an executed response
*/
public function delete($table_name, $condition)
{
$sql = "DELETE FROM $table_name WHERE $condition";
return $this->query($sql);
}
/**
* Performs a deletion query by defining a condition
* where the $column_name has to be equal to the given $column_value.
*
* @param string $table_name The table name.
* Column names as keys and update values as associated value, place holders can not be identifiers only values.
* @param string $column_name The column name used in the condition.
* @param string $column_value The column value used in the condition.
* @throws Exception An Exception is raised if the connection is null or executing a bad query
* @return UrabeResponse Returns the service response formatted as an executed response
*/
public function delete_by_field($table_name, $column_name, $column_value)
{
$sql = "DELETE FROM $table_name WHERE $column_name = %s";
$sql = sprintf($sql, $this->connector->get_param_place_holder(1));
$variables = array($column_value);
return $this->query($sql, $variables);
}
/**
* Formats the bindable parameters place holders in to
* the current driver place holder format
*
* @param string $sql The sql statement
* @return string Returns the formatted sql statement
*/
public function format_sql_place_holders($sql)
{
$matches = array();
preg_match_all("/@\d+/", $sql, $matches);
$search = array();
$replace = array();
for ($i = 0; $i < sizeof($matches[0]); $i++)
if (!in_array($matches[0][$i], $search)) {
$index = intval(str_replace('@', '', $matches[0][$i]));
array_push($search, $matches[0][$i]);
array_push($replace, $this->connector->get_param_place_holder($index));
}
return str_replace($search, $replace, $sql);
}
/**
* Gets the database connector driver
*
* @return DBDriver The database driver
*/
public function get_driver()
{
return $this->connector->db_driver;
}
}
<file_sep>/src/resources/Warai.php
<?php
/**
* Defines application constants
*
* @package URABE-API
* @author <NAME> <<EMAIL>>
* @version v.1.1 (01/10/2019)
* @copyright copyright (c) 2018-2020, Nameless Studios
*/
require_once "WaraiMessages_en.php";
require_once "EnumErrorMessages_en.php";
/***************************************
*********** Class Names ***************
***************************************/
/**
* @var string CLASS_ERR
* The class names used for application errors
*/
const CLASS_ERR = 'ConnectionError';
/**
* @var string CLASS_SQL_EXC
* The class names used for sql exceptions
*/
const CLASS_SQL_EXC = 'UrabeSQLException';
/***************************************
************** CAPTIONS ***************
***************************************/
/** String caption for Delete */
const CAP_DELETE = 'delete';
/**
* @var string CAP_UPDATE
* String caption for Update.
*/
const CAP_UPDATE = 'update';
/**
* @var string CAP_INSERT
* String caption for Insert.
*/
const CAP_INSERT = 'insert';
/**
* @var string CAP_EXTRACT
* String caption for Extract.
*/
const CAP_EXTRACT = 'Extract';
/**
* @var string CAP_URABE_ACTION
* The method prefix name used to define the methods that can be called
* via a web service
*/
const CAP_URABE_ACTION = 'u_action_';
/**
* @var string VAR_URABE_ACTION
* GET variable name used to defined a web service costume call back action
*/
const VAR_URABE_ACTION = 'uAction';
/**
* @var string CAP_GET_VARS
* String caption for Get variables
*/
const CAP_GET_VARS = 'GET variables';
/***************************************
************** JSON NODES *************
***************************************/
/**
* @var string NODE_RESULT
* The node name that saves the transaction result
*/
const NODE_RESULT = 'result';
/**
* @var string NODE_SIZE
* The node name that saves the result size
*/
const NODE_SIZE = 'size';
/**
* @var string NODE_MSG
* The node name to save the response message
*/
const NODE_MSG = 'message';
/**
* @var string NODE_QUERY
* The node name that saves the transaction query
*/
const NODE_QUERY = 'query';
/**
* @var string NODE_SQL
* The node name that saves the SQL statement
*/
const NODE_SQL = 'sql';
/**
* @var string NODE_ERROR
* The node name that saves the transaction error
*/
const NODE_ERROR = 'error';
/**
* @var string NODE_SUCCEED
* The node name that stores the query succeed status
*/
const NODE_SUCCEED = 'succeed';
/**
* @var string NODE_AFF_ROWS
* The node name that stores the number of affected rows
*/
const NODE_AFF_ROWS = 'affected_rows';
/**
* @var string NODE_QUERY_RESULT
* The node name that saves the transaction query result
*/
const NODE_QUERY_RESULT = 'query_result';
/**
* @var string NODE_ERROR_CONTEXT
* The node name that saves the error context
*/
const NODE_ERROR_CONTEXT = 'err_context';
/**
* @var string NODE_FIELDS
* The node name that saves the table field definition
*/
const NODE_FIELDS = 'fields';
/**
* @var string NODE_KEY
* The node name that saves an element key
*/
const NODE_KEY = 'key';
/**
* @var string NODE_CODE
* The node name that saves the number code
*/
const NODE_CODE = 'code';
/**
* @var string NODE_FILE
* The node name that saves the file path
*/
const NODE_FILE = 'file';
/**
* @var string NODE_LINE
* The node name that saves the file line
*/
const NODE_LINE = 'line';
/**
* @var string NODE_STACK
* The node name that saves the exception stack trace
*/
const NODE_STACK = 'stack_trace';
/**
* @var string NODE_PARAMS
* The node name that saves variables parameters
*/
const NODE_PARAMS = 'parameters';
/**
* @var string NODE_COLS
* The node name that stores an array of column names
*/
const NODE_COLS = 'columns';
/**
* @var string NODE_VAL
* The node name that stores an array of column names paired with its values
*/
const NODE_VAL = 'values';
/**
* @var string NODE_CONDITION
* The node name that stores an SQL statement condition
*/
const NODE_CONDITION = 'condition';
/****************************************
************ URL PARAMS KEYS ************
*****************************************/
/**
* @var string KEY_SERVICE
* The parameter key that defines a service name
*/
const KEY_SERVICE = 'service';
/**
* @var string KEY_TASK
* The parameter key that defines a service task
*/
const KEY_TASK = 'task';
/**
* @var string KEY_PRETTY_PRINT
* The parameter key that defines a service task
*/
const KEY_PRETTY_PRINT = 'PP';
/**
* @var string PRETTY_PRINT_DARK
* The parameter key that specifies a dark theme with pretty print
*/
const PRETTY_PRINT_DARK = 'Dark';
/**
* @var string PRETTY_PRINT_LIGHT
* The parameter key that specifies a light theme with pretty print
*/
const PRETTY_PRINT_LIGHT = 'Light';
/***************************
****** Parsing Types ******
***************************/
/**
* @var string PARSE_AS_STRING
* The field name that stores the column name
*/
const PARSE_AS_STRING = 'String';
/**
* @var string PARSE_AS_INT
* Parse the value as an integer
*/
const PARSE_AS_INT = 'Integer';
/**
* @var string PARSE_AS_LONG
* Parse the value as long
*/
const PARSE_AS_LONG = 'Long';
/**
* @var string PARSE_AS_NUMBER
* Parse the value as number
*/
const PARSE_AS_NUMBER = 'Number';
/**
* @var string PARSE_AS_DATE
* Parse the value as date
*/
const PARSE_AS_DATE = 'Date';
/**
* @var string PARSE_AS_BOOLEAN
* Parse the value as boolean
*/
const PARSE_AS_BOOLEAN = 'Boolean';
/**
* @var string PARSING_TYPES
* The name of the parsing types row
*/
const PARSING_TYPES = 'ParsingTypes';
/************************************
****** Table definition nodes ******
************************************/
/**
* @var string TAB_DEF_INDEX
* The field name that stores the column index
*/
const TAB_DEF_INDEX = 'column_index';
/**
* @var string TAB_DEF_NAME
* The field name that stores the column name
*/
const TAB_DEF_NAME = 'column_name';
/**
* @var string TAB_DEF_TYPE
* The field name that stores the column data type
*/
const TAB_DEF_TYPE = 'data_type';
/**
* @var string TAB_DEF_CHAR_LENGTH
* The field name that stores the column max number of character length
*/
const TAB_DEF_CHAR_LENGTH = 'char_max_length';
/**
* @var string TAB_DEF_NUM_PRECISION
* The field name that stores the column number precision
*/
const TAB_DEF_NUM_PRECISION = 'numeric_precision';
/**
* @var string TAB_DEF_NUM_SCALE
* The field name that stores the column number scale
*/
const TAB_DEF_NUM_SCALE = 'numeric_scale';
/**
* @var string TAB_NAME
* The field name that stores the name of the table
*/
const TAB_NAME = 'table_name';
/**
* @var string TAB_COL_FILTER
* The column name used as filter in the selection data
*/
const TAB_COL_FILTER = 'column_filter';
/******************************************
************ FIELD NAMES *****************
*****************************************/
/**
* @var string ORACLE_FIELD_COL_ORDER
* The field name that stores the column name
*/
const ORACLE_FIELD_COL_ORDER = 'COLUMN_ID';
/**
* @var string ORACLE_FIELD_COL_NAME
* The field name that stores the column name
*/
const ORACLE_FIELD_COL_NAME = 'COLUMN_NAME';
/**
* @var string ORACLE_FIELD_DATA_TP
* The field name that stores data type
*/
const ORACLE_FIELD_DATA_TP = 'DATA_TYPE';
/**
* @var string ORACLE_FIELD_CHAR_LENGTH
* The field name that stores data length
*/
const ORACLE_FIELD_CHAR_LENGTH = 'CHAR_LENGTH';
/**
* @var string ORACLE_FIELD_NUM_PRECISION
* The field name that stores data length
*/
const ORACLE_FIELD_NUM_PRECISION = 'DATA_PRECISION';
/**
* @var string ORACLE_FIELD_NUM_SCALE
* The field name that stores data length
*/
const ORACLE_FIELD_NUM_SCALE = 'DATA_SCALE';
/**
* @var string PG_FIELD_COL_ORDER
* The field name that stores the column name
*/
const PG_FIELD_COL_ORDER = 'ordinal_position';
/**
* @var string PG_FIELD_COL_NAME
* The field name that stores the column name
*/
const PG_FIELD_COL_NAME = 'column_name';
/**
* @var string PG_FIELD_DATA_TP
* The field name that stores data type
*/
const PG_FIELD_DATA_TP = 'data_type';
/**
* @var string PG_FIELD_CHAR_LENGTH
* The field name that stores data length
*/
const PG_FIELD_CHAR_LENGTH = 'character_maximum_length';
/**
* @var string PG_FIELD_NUM_PRECISION
* The field name that stores data length
*/
const PG_FIELD_NUM_PRECISION = 'numeric_precision';
/**
* @var string PG_FIELD_NUM_SCALE
* The field name that stores data length
*/
const PG_FIELD_NUM_SCALE = 'numeric_scale';
/**
* @var string MYSQL_FIELD_COL_ORDER
* The field name that stores the column name
*/
const MYSQL_FIELD_COL_ORDER = 'ORDINAL_POSITION';
/**
* @var string MYSQL_FIELD_COL_NAME
* The field name that stores the column name
*/
const MYSQL_FIELD_COL_NAME = 'COLUMN_NAME';
/**
* @var string MYSQL_FIELD_DATA_TP
* The field name that stores data type
*/
const MYSQL_FIELD_DATA_TP = 'COLUMN_TYPE';
/**
* @var string MYSQL_FIELD_CHAR_LENGTH
* The field name that stores data length
*/
const MYSQL_FIELD_CHAR_LENGTH = 'CHARACTER_MAXIMUM_LENGTH';
/**
* @var string MYSQL_FIELD_NUM_PRECISION
* The field name that stores data length
*/
const MYSQL_FIELD_NUM_PRECISION = 'NUMERIC_PRECISION';
/**
* @var string MYSQL_FIELD_NUM_SCALE
* The field name that stores data length
*/
const MYSQL_FIELD_NUM_SCALE = 'NUMERIC_SCALE';
/******************************************
************ FUNCTION NAMES **************
*****************************************/
/**
* @var string F_POST
* The name of the POST action function
*/
const F_POST = 'POST_action';
/**
* @var string F_GET
* The name of the GET action function
*/
const F_GET = 'GET_action';
/************************************
************ Settings **************
************************************/
/**
* @var string GET_PARAM_MODE
* The type of parameters that are obtains from get variables
*/
const GET_PARAM_MODE = 'GET_VARS';
/**
* @var string URL_PARAM_MODE
* The type of parameters that are obtains from url parameters
*/
const URL_PARAM_MODE = 'URL_PARAMETERS';
/**
* @var string MIX_PARAM_MODE
* Use get variables and url parameters.
*/
const GET_AND_URL_PARAM = "MIXED";
?><file_sep>/testing/KanojoXTester.php
<?php
include_once "utils/KanojoXTestUtils.php";
/**
* This file test the functionality for the class KanojoX
*
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
$response = new UrabeResponse();
$result = (object)array();
//0: Reads the body
$body = get_body_as_json();
if (isset($body)) {
//1: Selects the driver connector
$kanojo = pick_connector($body->driver, $body);
//2: Initialize the connection data
$kanojo->init($body->connection);
//3: Connect to the Database
$conn = $kanojo->connect();
} else
$kanojo = null;
//4: Pick a test
$test = TEST_VAR_NAME . "_" . $_GET[TEST_VAR_NAME];
//5: Test
$result->{$_GET[TEST_VAR_NAME]} = $test($kanojo, $body);
//6: Close the connection
$conn = isset($kanojo) ? $kanojo->close() : null;
//7: Print result
echo json_encode($result);
?><file_sep>/src/NumberType.php
<?php
require_once "Enum.php";
/**
* Defines the type of number to be saved
* @api Makoto Urabe DB Manager DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
abstract class NumberType extends Enum
{
/**
* @var string NAN
* Not a number
*/
const NAN = -1;
/**
* @var string INTEGER
* Defines a numeric integer
*/
const INTEGER = 0;
/**
* @var string LONG
* Defines a numeric long
*/
const LONG = 1;
/**
* @var string DOUBLE
* Defines a numeric double
*/
const DOUBLE = 2;
}
?><file_sep>/testing/UrabeTester.php
<?php
include_once "utils/UrabeTestUtils.php";
/**
* This file test the functionality for the class Urabe
*
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
$result = (object)array();
//0: Reads the body
$body = get_body_as_json();
if (isset($body)) {
//1: Creates a Kanojo Object and initialize it
$kanojo = pick_connector($body->driver, $body);
$kanojo->init($body->connection);
//2: Open Urabe connector
$urabe = new Urabe($kanojo);
} else
$urabe = null;
//4: Pick a test
$test = TEST_VAR_NAME . "_" . $_GET[TEST_VAR_NAME];
//5: Test
$result->{$_GET[TEST_VAR_NAME]} = $test($urabe, $body);
//Connection is closed automatically calling the kanojo destructor
echo json_encode($result);
?><file_sep>/src/DateFieldDefinition.php
<?php
include_once "FieldDefinition.php";
/**
* String Date Definition Class
*
* This class encapsulates a table column definition and format it values to JSON field value
* Each table field is associated to a column and stores its index and data type.
*
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
class DateFieldDefinition extends FieldDefinition
{
/**
* @var string The format used to parse the given date
*/
public $date_format;
/**
* Initialize a new instance of a Field Definition class
*
* @param string $index The column index
* @param string $column The column name
* @param string $data_type The data type name
* @param string $date_format The date format
*/
public function __construct($index, $column, $data_type, $date_format)
{
$this->date_format = $date_format;
parent::__construct($index, $column, $data_type);
}
/**
* Gets the value from a string in the row definition data type
*
* @param string $value The selected value as string
* @return string The value formatted as a date
*/
public function get_value($value)
{
if (is_null($value))
return null;
else
return date($this->date_format, strtotime($value));
}
}
?><file_sep>/src/StringFieldDefinition.php
<?php
include_once "FieldDefinition.php";
/**
* String Field Definition Class
*
* This class encapsulates a table column definition and format it values to JSON field value
* Each table field is associated to a column and stores its index and data type.
*
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
class StringFieldDefinition extends FieldDefinition
{
/**
* @var int The maximum number of allowed characters
*/
public $char_max_length;
/**
* Initialize a new instance of a Field Definition class
*
* @param string $index The column index
* @param string $column The column name
* @param string $data_type The data type name
* @param int $char_max_length The maximum number of allowed characters,
* value zero allows unlimited characters
*/
public function __construct($index, $column, $data_type, $char_max_length)
{
$this->char_max_length = $char_max_length;
parent::__construct($index, $column, $data_type);
}
/**
* Gets the value from a string in the row definition data type
*
* @param string $value The selected value as string
* @return mixed The value as the same type of the table definition.
*/
public function get_value($value)
{
if (is_null($value))
return null;
else {
return strval($value);
}
}
}
?><file_sep>/src/NumericFieldDefinition.php
<?php
include_once "FieldDefinition.php";
include_once "NumberType.php";
/**
* String Field Definition Class
*
* This class encapsulates a table column definition and format it values to JSON field value
* Each table field is associated to a column and stores its index and data type.
*
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
class NumericFieldDefinition extends FieldDefinition
{
/**
* @var int The numeric precision
*/
public $numeric_precision;
/**
* @var int The numeric scale
*/
public $numeric_scale;
/**
* @var NumberType The numeric type
*/
public $numeric_type;
/**
* Initialize a new instance of a Field Definition class
*
* @param string $index The column index
* @param string $column The column name
* @param string $data_type The data type name
* @param int $precision The numeric precision
* @param int $scale The numeric scale
* value zero allows unlimited characters
*/
public function __construct($index, $column, $data_type, $precision, $scale)
{
$this->numeric_scale = $scale;
$this->numeric_precision = $precision;
if ($data_type == PARSE_AS_INT)
$this->numeric_type = NumberType::INTEGER;
else if ($data_type == PARSE_AS_NUMBER)
$this->numeric_type = NumberType::DOUBLE;
else if ($data_type == PARSE_AS_LONG)
$this->numeric_type = NumberType::LONG;
else
$this->numeric_type = NumberType::NAN;
parent::__construct($index, $column, $data_type);
}
/**
* Gets the value from a string in the row definition data type
*
* @param string $value The selected value as string
* @return mixed The value as the same type of the table definition.
*/
public function get_value($value)
{
if (is_null($value))
return null;
else if ($this->numeric_type == NumberType::INTEGER) {
return intval($value);
} else if ($this->numeric_type == NumberType::DOUBLE) {
return doubleval($value);
} else if ($this->numeric_type == NumberType::LONG) {
return strval($value);
} else
return null;
}
}
?><file_sep>/src/UrabeSettings.php
<?php
include_once "ServiceStatus.php";
include_once "DBDriver.php";
include_once "JsonPrettyPrint.php";
/**
* Urabe application settings
*
* In this file the application work around can be customized
*
* @version 1.0.0
* @api Makoto Urabe DB Manager database connector
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
return (object)array(
/**
* @var string Defines the type of parameters to use by the web services.
* Available modes URL_PARAM_MODE or GET_PARAM_MODE or MIX_PARAM_MODE
*/
"parameter_mode" => URL_PARAM_MODE,
/**
* @var bool If sets to true Urabe handles errors as defined in the KanojoX Class
*/
"handle_errors" => true,
/**
* @var bool If sets to true Urabe handles exceptions as defined in the KanojoX Class
*/
"handle_exceptions" => true,
/**
* @var bool If sets to true and Urabe handles exceptions the error details such as file, line, error code and context are showed in the response
*/
"show_error_details" => false,
/**
* @var bool If sets to true and Urabe handles exceptions the error context is shown in the response
*/
"show_error_context" => false,
/**
* @var bool If sets to true and Urabe handles exceptions the stack trace will be added to the response
*/
"enable_stack_trace" => false,
/**
* @var bool If sets to true add SQL statement in Urabe response. This should be enable just for testing purpose,
* not recommendable for staging or production.
*/
"add_query_to_response" => true,
/**
* @var bool If sets to true hides the error code. This should be enable just for testing purpose,
* not recommendable for staging or production.
*/
"hide_exception_error" => false,
/**
* @var ServiceStatus The default status for GET Service
*/
"default_GET_status" => ServiceStatus::AVAILABLE,
/**
* @var ServiceStatus The default status for POST Service
*/
"default_POST_status" => ServiceStatus::BLOCKED,
/**
* @var ServiceStatus The default status for PUT Service
*/
"default_PUT_status" => ServiceStatus::BLOCKED,
/**
* @var ServiceStatus The default status for DELETE Service
*/
"default_DELETE_status" => ServiceStatus::BLOCKED,
/**
* @var JsonPrettyStyle The JSON PP Dark Style
*/
"dark_pp_style" => JsonPrettyStyle::DarkStyle(),
/**
* @var JsonPrettyStyle The JSON PP Light Style
*/
"light_pp_style" => JsonPrettyStyle::LightStyle(),
/**
* @var JsonPrettyStyle The default JSON PP Style
*/
"default_pp_style" => JsonPrettyStyle::DarkStyle(),
/**
* @var JsonPrettyStyle The default JSON PP Style
*/
"default_pp_style" => JsonPrettyStyle::DarkStyle(),
/**
* @var boolean True if the background is dark, otherwise it will be white
*/
"default_pp_bg" => true,
/**
* @var string The date format used to present dates, to modify
* the date format visit the url: https://secure.php.net/manual/en/function.date.php
*/
"date_format" => "m-d-y",
/**
* @var string The path to the folder where the table definitions are stored
*/
"table_definitions_path" => dirname(__DIR__) . DIRECTORY_SEPARATOR . 'tmp' . DIRECTORY_SEPARATOR . 'table_definitions' . DIRECTORY_SEPARATOR,
/**
* @var object Defines the parsing types sort by category
*/
"field_type_category" => (object)array(
"ParsingTypes" => array(
"String", "Integer", "Long", "Number", "Date", "Boolean"
),
"String" => array(
//PG Types
"character", "text",
//MySQL Types
"varchar"
),
"Integer" => array(
//PG Types
"integer", "smallint",
//MySQL Types
"int"
),
"Long" => array(
//PG Types
"bigint"
//MySQL types
),
"Number" => array(
//PG Types
"double precision", "numeric", "real",
//MySQL types
"double"
),
"Date" => array(
//PG Types
"date",
"timestamp"
),
"Boolean" => array(
"boolean"
)
)
);
?><file_sep>/testing/utils/HasamiWrapperTestUtils.php
<?php
include_once "../src/HasamiWrapper.php";
include_once "TestUtils.php";
?><file_sep>/src/PUTService.php
<?php
include_once "HasamiRESTfulService.php";
/**
* PUT Service Class
* This class defines a restful service with a request verbose PUT.
* This method is often used to insert data to the database.
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
class PUTService extends HasamiRESTfulService
{
/**
* __construct
*
* Initialize a new instance of the PUT Service class.
* A default service task is defined as a callback using the function PUTService::default_PUT_action
*
* @param IHasami $wrapper The web service wrapper
*/
public function __construct($wrapper)
{
$data = $wrapper->get_request_data();
$data->extra->{TAB_NAME} = $wrapper->get_table_name();
$data->extra->{CAP_INSERT} = $wrapper->get_insert_columns();
$urabe = $wrapper->get_urabe();
parent::__construct($data, $urabe);
$this->wrapper = $wrapper;
$this->service_task = function ($data, $urabe) {
return $this->default_PUT_action($data, $urabe);
};
}
/**
* Wraps the insert function from urabe
* @param string $table_name The table name.
* @param object $values The values to insert as key value pair array.
* Column names as keys and insert values as associated value, place holders can not be identifiers only values.
* @throws Exception An Exception is raised if the connection is null or executing a bad query
* @return UrabeResponse Returns the service response formatted as an executed response
*/
public function insert($table_name, $values)
{
return $this->urabe->insert($table_name, $values);
}
/**
* Wraps the insert_bulk function from urabe
*
* @param string $table_name The table name.
* @param array $values The values to insert as key value pair array.
* Column names as keys and insert values as associated value, place holders can not be identifiers only values.
* @throws Exception An Exception is raised if the connection is null or executing a bad query
* @return UrabeResponse Returns the service response formatted as an executed response
*/
public function insert_bulk($table_name, $columns, $values)
{
return $this->urabe->insert_bulk($table_name, $columns, $values);
}
/**
* Defines the default PUT action, by default execute an insertion query with the given data passed
* in the body properties insert_values
* @param WebServiceContent $data The web service content
* @param Urabe $urabe The database manager
* @throws Exception An Exception is thrown if the response can be processed correctly
* @return UrabeResponse The server response
*/
protected function default_PUT_action($data, $urabe)
{
return $this->default_action($data, $urabe);
}
/**
* Defines the default PUT action, by default execute an insertion query with the given data passed
* in the body properties insert_values
* @param WebServiceContent $data The web service content
* @param Urabe $urabe The database manager
* @throws Exception An Exception is thrown if the response can be processed correctly
* @return UrabeResponse The server response
*/
public function default_action($data, $urabe)
{
try {
$table_name = $data->extra->{TAB_NAME};
$insert = $data->extra->{CAP_INSERT};
//Validate column data
$this->validate_columns('insert_values', $insert);
//Validate values
if (!property_exists($this->data->body->insert_values, NODE_VAL))
throw new Exception(sprintf(ERR_INCOMPLETE_DATA, 'insert_values', NODE_VAL));
//Formats values with table definition
$values = $this->wrapper->format_values($this->data->body->insert_values->values);
$columns = $this->data->body->insert_values->columns;
//Build insert query
if (is_array($values))
$response = $this->urabe->insert_bulk($table_name, $columns, $values);
else
$response = $this->urabe->insert($table_name, $values);
return $response;
} catch (Exception $e) {
throw new Exception("Error Processing Request, " . $e->getMessage(), $e->getCode());
}
}
}
<file_sep>/src/JsonPrettyStyle.php
<?php
/**
* Json Pretty Print Style
*
* This class represent the color style used by the JSON Pretty Print
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
class JsonPrettyStyle
{
/**
* @var string Defines the color used for symbols like "{" or "," in the JSON string.
*/
public $symbol_color;
/**
* @var string Defines the color used for properties names in the JSON string.
*/
public $property_name_color;
/**
* @var string Defines the color used for text values in the JSON string..
*/
public $text_value_color;
/**
* @var string Defines the color used for number values in the JSON string..
*/
public $number_value_color;
/**
* @var string Defines the color used for null value
*/
public $null_value_color;
/**
* @var string Defines the color used for boolean values in the JSON string.
*/
public $boolean_value_color;
/**
* Defines the style used in white backgrounds
*
* @return JsonPrettyStyle The JSON style
*/
public static function LightStyle()
{
$style = new JsonPrettyStyle();
$style->symbol_color = "#808388";
$style->boolean_value_color = "#00a2e8";
$style->property_name_color = "#000";
$style->text_value_color = "#008000";
$style->number_value_color = "#e23400";
$style->null_value_color ="#730202";
return $style;
}
/**
* Defines the style used in black backgrounds
* This is the default PP Style
*
* @return JsonPrettyStyle The JSON style
*/
public static function DarkStyle()
{
$style = new JsonPrettyStyle();
$style->symbol_color = "#ffffff";
$style->boolean_value_color = "#66d5ef";
$style->property_name_color = "#a6e22e";
$style->text_value_color = "#FAB02F";
$style->number_value_color = "#9481dc";
$style->null_value_color ="#e92647";
return $style;
}
}
?><file_sep>/testing/utils/KanojoXTestUtils.php
<?php
include_once "../src/ORACLEKanojoX.php";
include_once "../src/PGKanojoX.php";
include_once "../src/MYSQLKanojoX.php";
include_once "TestUtils.php";
/**
* This file defines the tests available for testing the
* KanojoX class
*
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
/**
* This function is an example for testing a SQL selection query and
* fetching the result associatively
*
* @param KanojoX $kanojo The database connector
* @param object $body The request body decoded as an object from JSON data
* @return UrabeResponse The selection result as a web service response
*/
function test_fetch_assoc_no_params($kanojo, $body)
{
$sql = $body->sql_no_params;
$result = new UrabeResponse();
$row = $kanojo->fetch_assoc($sql);
$result = $result->get_response("KanojoX fetch assoc test with no params", $row, $sql);
return $result;
}
/**
* This function is an example for testing a SQL selection query that creates a prepared statement with the given parameters then
* fetches the result associatively
*
* @param KanojoX $kanojo The database connector
* @param object $body The request body decoded as an object from JSON data
* @return UrabeResponse The selection result as a web service response
*/
function test_fetch_assoc_with_params($kanojo, $body)
{
$result = new UrabeResponse();
$sql = $body->sql_params;
$params = $body->params;
$row = $kanojo->fetch_assoc($sql, $params);
$result = $result->get_response("KanojoX fetch assoc test with params", $row, $sql);
return $result;
}
/**
* This function is an example for testing a SQL selection query that selects a table definition
*
* @param KanojoX $kanojo The database connector
* @param object $body The request body decoded as an object from JSON data
* @return UrabeResponse The selection result as a web service response
*/
function test_get_table_definition($kanojo, $body)
{
$result = new UrabeResponse();
$sql = $kanojo->get_table_definition_query($body->table_name);
$row = $kanojo->fetch_assoc($sql);
$result = $result->get_response("KanojoX get table definition test", $row, $sql);
return $result;
}
/**
* This function is an example for testing a SQL execution query.
* This method is used if you are using INSERT, DELETE, or UPDATE SQL statements.
*
* @param KanojoX $kanojo The database connector
* @param object $body The request body decoded as an object from JSON data
* @return UrabeResponse The selection result as a web service response
*/
function test_execute_with_no_params($kanojo, $body)
{
$result = new UrabeResponse();
$sql = $body->update_sql_no_params;
$result = $kanojo->execute($sql);
return $result;
}
/**
* This function is an example for testing a SQL execution query That creates a prepared statement with the given parameters.
* This method is used if you are using INSERT, DELETE, or UPDATE SQL statements.
*
* @param KanojoX $kanojo The database connector
* @param object $body The request body decoded as an object from JSON data
* @return UrabeResponse The selection result as a web service response
*/
function test_execute_with_params($kanojo, $body)
{
$result = new UrabeResponse();
$sql = $body->update_sql_params;
$params = $body->params;
$result = $kanojo->execute($sql, $params);
return $result;
}
/**
* This function is an example for testing an error triggering and managing errors using KanojoX
*
* @param KanojoX $kanojo The database connector
* @param object $body The request body decoded as an object from JSON data
* @return UrabeResponse The selection result as a web service response
*/
function test_send_error($kanojo, $body)
{
trigger_error('Trigger Error', E_USER_WARNING);
return $kanojo->get_last_error();
}
/**
* This function list all available tests
*
*/
function test_get_available_tests()
{
$functions = get_defined_functions();
$functions = $functions["user"];
$test_func = array();
for ($i = 0; $i < sizeof($functions); $i++) {
if (substr($functions[$i], 0, 5) == TEST_VAR_NAME . "_")
array_push($test_func, str_replace(array("test_"), array(), $functions[$i]));
}
$response = array("msg" => "Available functions", "tests" => $test_func, "size" => sizeof($test_func));
return $response;
}
?><file_sep>/testing/HasamiWrapperTester.php
<?php
ini_set('display_errors', 1);
ini_set('display_startup_errors', 1);
error_reporting(E_ALL);
include_once "utils/HasamiWrapperTestUtils.php";
/**
* HasamiWrapperTester Class
*
* This class is used to test the functionality of a web service built with HasamiWrapper
* @version 1.0.0
* @api Makoto Urabe DB Manager database connector
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
class HasamiWrapperTester extends HasamiWrapper
{
const TABLE_NAME = "users";
/**
* Initialize a new instance of the test service
*/
public function __construct()
{
$connector = get_KanojoX_from_file("../tmp/conn_file.json");
parent::__construct($connector->schema . "." . self::TABLE_NAME, $connector, "id");
//This changes default status for the given services
$this->set_service_status("PUT", ServiceStatus::AVAILABLE);
$this->set_service_status("DELETE", ServiceStatus::AVAILABLE);
//This mode will simulate the user is logged executing the function
$this->set_service_status("POST", ServiceStatus::LOGGED);
//This only applies if GET verbose detected
if ($this->request_data->method == "GET" && $this->request_data->GET_variable_equals("selection_mode", "advance"))
$this->set_service_task("GET", "advance_select");
//This only applies if POST verbose is detected
if ($this->request_data->method == "POST" && $this->request_data->GET_variable_equals("update_mode", "advance"))
$this->set_service_task("POST", "advance_update");
//This only applies if POST verbose is detected
if ($this->request_data->method == "DELETE" && $this->request_data->GET_variable_equals("delete_mode", "advance"))
$this->set_service_task("DELETE", "advance_delete");
}
/**
* This functions simulates the validation access
* via a selection of an user id via a user password and username.
* The username and password will be send in the body.
*
* @return boolean True if the validation access succeed
*/
protected function validate_access()
{
if ($this->request_data->validate_obligatory_body_properties("username", "password")) {
$user_name = $this->request_data->body->username;
$password = $this->request_data->body->password;
$response = $this->select_user($this->urabe, array($user_name, $password));
//Should select at least one row.
//This simulates a validation access
return $response->size > 0;
}
}
/**
* Gets the table INSERT column names
* By default the insertion columns are all the columns from the table definition
* except by the primary key column
*
* @return array Returns the column names in an array of strings
*/
public function get_insert_columns()
{
$column_names = parent::get_insert_columns();
//Ignore last login column
unset($column_names["last_login"]);
return $column_names;
}
/**
* This functions test the advance selection, this function overrides the default selection
* and its defined using the Wrapper set_service_task passing as parameter the request method "GET"
* and the function name. Also for this example, this function expects that the GET variables contains
* "username" and "password"
*
* @param WebServiceContent $data The web service content
* @param Urabe $urabe The database manager
* @return UrabeResponse The selection response
*/
public function advance_select($data, $urabe)
{
if ($data->validate_obligatory_GET_variables("username", "password")) {
//Use universal format @paramIndex for place holders
$parameters = $data->pick_GET_variable("username", "password");
return $this->select_user($urabe, $parameters);
}
}
/**
* This functions test the advance update, this function overrides the default update actions
* using the Wrapper set_service_task passing as parameter the request method "POST"
* and the function name. Also for this example, this function expects that the some parameters are defined in the
* condition body
*
* @param WebServiceContent $data The web service content
* @param Urabe $urabe The database manager
* @return UrabeResponse The selection response
*/
public function advance_update($data, $urabe)
{
//Validate body
$data->validate_obligatory_body_properties(NODE_VAL, "adv_condition");
//Extract values
$percent = $this->format_value($urabe->get_driver(), "percent", $data->body->adv_condition->percent);
$is_active = $this->format_value($urabe->get_driver(), "is_active", $data->body->adv_condition->is_active);
//Build condition
$condition = "percent > " . $percent . " AND " . "is_active = '" . $is_active . "'";
$values = $this->format_values($data->body->{NODE_VAL});
//Update
return $urabe->update($this->table_name, $values, $condition);
}
/**
* This functions test the advance delete, this function overrides the default delete action
* using the Wrapper set_service_task, passing as parameter the request method "DELETE"
* and the function name. Also for this example, this function expects that some parameters are defined in the
* condition body
*
* @param WebServiceContent $data The web service content
* @param Urabe $urabe The database manager
* @return UrabeResponse The selection response
*/
public function advance_delete($data, $urabe)
{
//Validate body
$data->validate_obligatory_body_properties("adv_condition");
//Extract values
$percent = $this->format_value($urabe->get_driver(), "percent", $data->body->adv_condition->percent);
$is_active = $this->format_value($urabe->get_driver(), "is_active", $data->body->adv_condition->is_active);
//Build condition
$condition = "percent > " . $percent . " AND " . "is_active = '" . $is_active . "'";
//Update
return $urabe->delete($this->table_name, $condition);
}
/**
* Tests a function that only is allowed to execute in POST or PUT
* By default callback functions received the web service content and the database connector
* @param WebServiceContent $data The web service content
* @param Urabe $urabe The database manager
* @return UrabeResponse The urabe response
*/
public function u_action_test_restrict_call_access($data, $urabe)
{
$data->restrict_by_content("POST", "PUT");
$response = new UrabeResponse();
return $response->get_response("You are allowed", array());
}
/**
* Tests the service data current status this function should be called
*
* @return void
*/
public function u_action_status()
{
return $this->get_status();
}
/**
* Selects and user from the database
*
* @param Urabe $urabe The database manager
* @param array $parameters The parameters needed to select the user,
* Should be user_name and password
* @return object The message response
*/
private function select_user($urabe, $parameters)
{
$table_name = $this->table_name;
$condition = "u_name = @1 AND u_pass = @2";
$sql = $urabe->format_sql_place_holders("SELECT * FROM $table_name WHERE $condition");
$result = $urabe->select($sql, $parameters);
return $result;
}
}
$service = new HasamiWrapperTester();
$result = $service->get_response();
echo (is_string($result) ? $result : json_encode($result, JSON_PRETTY_PRINT));
?><file_sep>/src/MysteriousParser.php
<?php
include_once "StringFieldDefinition.php";
include_once "NumericFieldDefinition.php";
include_once "DateFieldDefinition.php";
include_once "BooleanFieldDefinition.php";
/**
* Mysterious parser class
*
* This class parses a row from a table definition
* @version 1.0.0
* @api Makoto Urabe DB Manager Oracle
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
class MysteriousParser
{
public $id;
/**
* @var array The table fields definition as an array of FieldDefinition.
*/
public $table_definition;
/**
* @var array Defines how the columns are mapped to the message response, if null
* the columns maintains the database column names. The values are passed as a key value pair, where the
* first value is the database column name and the second the message field name.
* This values are case sensitive
*/
public $column_map;
/**
* Defines the result parsing method, this function receives a row and
* an array where the data should be putted.
*
* @var callback The parse method, passed as an anonymous function
*/
public $parse_method;
/**
* Specifies the class used to called the parsing methods
*
* @var object The main class used to call the parsing method
*/
private $caller;
/**
* __construct
*
* Initialize a new instance of the Mysterious parser.
* @param FieldDefinition[] $table_definition The table fields definition.
* When table definition is presented the fetched data is parsed using the parse_with_field_definition function
*/
public function __construct($table_definition = null, $caller = null, $parse_method = "")
{
$this->caller = isset($caller) ? $caller : $this;
$this->table_definition = $table_definition;
if (isset($table_definition) && !isset($caller))
$this->parse_method = "parse_with_field_definition";
else if (isset($caller)) {
$this->parse_method = $parse_method;
} else {
$this->parse_method = "simple_parse";
}
$this->id = hash("md5", $this->parse_method . spl_object_hash($this));
}
/**
* Gets the sender description
*
* @param mixed $context Extra data used by this sender
* @return array The sender data as a key value paired array with the keys {caller, method, id, context}
*/
private function get_sender($context = null)
{
return array("caller" => get_class($this->caller), "method" => $this->parse_method, "id" => $this->id, "context" => is_null($context) ? "" : $context);
}
/**
* Check if a field name is defined on the table definition
*
* @param string $field_name The field name
* @return boolean True if the field name is defined otherwise false
*/
public function is_defined($field_name)
{
return array_key_exists($field_name, $this->table_definition);
}
/**
* Parse the fetch assoc result by the parse_method callback definition
*
* @param array $result The result row to parse
* @param array $row The selected row picked from the fetch assoc process.
* @return void
*/
public function parse(&$result, $row)
{
if (is_string($this->parse_method))
$this->caller->{$this->parse_method}($this, $result, $row);
else
call_user_func_array($this->parse_method, array($this, &$result, $row));
}
/**
* Gets the field definition used to parse a row
*
* @param string $newRow The row definition
* @return FieldDefinition The field definition
*/
public function get_parsing_data($newRow)
{
$tp = $newRow[TAB_DEF_TYPE];
$dataTypes = KanojoX::$settings->field_type_category;
$max_length = is_null($newRow[TAB_DEF_CHAR_LENGTH]) ? 0 : intval($newRow[TAB_DEF_CHAR_LENGTH]);
$scale = is_null($newRow[TAB_DEF_NUM_SCALE]) ? 0 : intval($newRow[TAB_DEF_NUM_SCALE]);
$precision = is_null($newRow[TAB_DEF_NUM_PRECISION]) ? 0 : intval($newRow[TAB_DEF_NUM_PRECISION]);
if ($tp == PARSE_AS_STRING || $this->is_of_type($tp, $dataTypes->String))
$field_definition = new StringFieldDefinition($newRow[TAB_DEF_INDEX], $newRow[TAB_DEF_NAME], PARSE_AS_STRING, $max_length);
else if ($tp == PARSE_AS_INT || $this->is_of_type($tp, $dataTypes->Integer))
$field_definition = new NumericFieldDefinition($newRow[TAB_DEF_INDEX], $newRow[TAB_DEF_NAME], PARSE_AS_INT, $precision, $scale);
else if ($tp == PARSE_AS_NUMBER || $this->is_of_type($tp, $dataTypes->Number))
$field_definition = new NumericFieldDefinition($newRow[TAB_DEF_INDEX], $newRow[TAB_DEF_NAME], PARSE_AS_NUMBER, $precision, $scale);
else if ($tp == PARSE_AS_DATE || $this->is_of_type($tp, $dataTypes->Date))
$field_definition = new DateFieldDefinition($newRow[TAB_DEF_INDEX], $newRow[TAB_DEF_NAME], PARSE_AS_DATE, KanojoX::$settings->date_format);
else if ($tp == PARSE_AS_LONG || $this->is_of_type($tp, $dataTypes->Long))
$field_definition = new NumericFieldDefinition($newRow[TAB_DEF_INDEX], $newRow[TAB_DEF_NAME], PARSE_AS_LONG, $precision, $scale);
else if ($tp == PARSE_AS_BOOLEAN || $this->is_of_type($tp, $dataTypes->Boolean))
$field_definition = new BooleanFieldDefinition($newRow[TAB_DEF_INDEX], $newRow[TAB_DEF_NAME], PARSE_AS_BOOLEAN);
else
$field_definition = new FieldDefinition($newRow[TAB_DEF_INDEX], $newRow[TAB_DEF_NAME], $tp);
$field_definition->db_type = $newRow[TAB_DEF_TYPE];
return $field_definition;
}
/**
* Check if a given type belongs to a given type category
*
* @param string $dataType The data type to validate
* @param string $dataTypes The collection of data types
* @return Boolean True if the data types is of any of the given types
*/
public function is_of_type($dataType, $dataTypes)
{
$tp = strtolower($dataType);
foreach ($dataTypes as &$data_type)
if (strpos($tp, $data_type) !== false)
return true;
return false;
}
/**
* Parse the data using the field definition, if a column map is set the result keys are mapped
* to the given value
*
* @param MysteriousParser $mys_parser The mysterious parser that are extracting the data
* @param array $result The collection of rows where the parsed rows are stored
* @param array $row The selected row picked from the fetch assoc process
* @return void
*/
public function parse_table_field_definition($mys_parser, &$result, $row)
{
$newRow = array();
$column_names = array_map(function ($item) {
return $item->column_name;
}, $mys_parser->table_definition);
foreach ($row as $column_name => $column_value) {
if (in_array($column_name, $column_names)) {
$key = $mys_parser->get_column_name($column_name);
$value = $mys_parser->table_definition[$column_name]->get_value($column_value);
$newRow[$key] = $value;
}
}
$result[$newRow[TAB_DEF_NAME]] = $this->get_parsing_data($newRow);
}
/**
* Execute the default parse storing the value to the array with an associated key.
* The associated key is the column name
*
* @param MysteriousParser $mys_parser The mysterious parser that are extracting the data
* @param array $result The collection of rows where the parsed rows are stored
* @param array $row The selected row picked from the fetch assoc process
* @return void
*/
public function simple_parse($mys_parser, &$result, $row)
{
array_push($result, $row);
}
/**
* Parse the data using the field definition, if a column map is set the result keys are mapped
* to the given value
*
* @param MysteriousParser $mys_parser The mysterious parser that are extracting the data
* @param array $result The collection of rows where the parsed rows are stored
* @param array $row The selected row picked from the fetch assoc process
* @return void
*/
protected function parse_with_field_definition($mys_parser, &$result, $row)
{
$newRow = array();
$column_names = array_map(function ($item) {
return $item->column_name;
}, $mys_parser->table_definition);
foreach ($row as $column_name => $column_value) {
if (in_array($column_name, $column_names)) {
$key = $mys_parser->get_column_name($column_name);
$value = $mys_parser->table_definition[$column_name]->get_value($column_value);
$newRow[$key] = $value;
}
}
array_push($result, $newRow);
}
/**
* Gets the column name from the column_map array if is defined, otherwise
* the column_name stays as the value selected
*
* @param string $column_name The column name
* @return string The column name, same or mapped name
*/
protected function get_column_name($column_name)
{
if (isset($this->column_map) && array_key_exists($column_name, $this->column_map))
return $this->column_map[$column_name];
else
return $column_name;
}
}<file_sep>/src/HasamiWrapper.php
<?php
include_once "GETService.php";
include_once "PUTService.php";
include_once "DELETEService.php";
include_once "POSTService.php";
include_once "IHasami.php";
/**
* A Hasami Wrapper is a web service wrapper Class
* This class encapsulate and manage web service verbose PUT, POST, DELETE and GET
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
class HasamiWrapper implements IHasami
{
/************************
*** Protected fields ***
************************/
/**
* The web service request content
*
* @var WebServiceContent The web service content
*/
protected $request_data;
/**
* @var Urabe The database manager
*/
protected $urabe;
/**
* @var array The table fields definitions
* Can be loaded from a query or from a JSON string
*/
protected $table_fields;
/**
* @var string The table name
*/
protected $table_name;
/**
* @var string The Selection filter used by GET service
*/
protected $selection_filter;
/**
* @var string Sets or gets the table primary key column name
* This field is used when constructing a condition for UPDATE or DELETE
*/
protected $primary_key;
/**
* @var array The Restful services managed by hasami wrapper in an array of HasamiRESTfulService
* Each value is index by the verbose name
*/
protected $services;
/**
* @var array The Restful services available status, the service only execute when the
* status is active or by succeeding in authorization mode
* Each value is index by the verbose name
*/
protected $services_status;
/************************************
*** Public access to properties ***
*** via getters and setters ***
************************************/
/**
* Gets the database manager
*
* @return Urabe The database manager
*/
public function get_urabe()
{
return $this->urabe;
}
/**
* Gets the table definition as an array of FieldDefinition
* @return array The table fields as an array of FieldDefinition
*/
public function get_table_definition()
{
$this->table_fields = $this->urabe->get_table_definition($this->table_name);
}
/**
* Gets the web service request content
*
* @return WebServiceContent Returns the web service content
*/
public function get_request_data()
{
return $this->request_data;
}
/**
* Gets the table name
*
* @return string Returns the table name
*/
public function get_table_name()
{
return $this->table_name;
}
/**
* Gets the column name used as default filter
*
* @return string Returns the column name
*/
public function get_selection_filter()
{
return $this->selection_filter;
}
/**
* Sets the selection filter, used by the GET service
* in its default mode
* @param string $condition The filter condition
* @return string Returns the column name
*/
public function set_selection_filter($condition)
{
$this->selection_filter = $condition;
}
/**
* Gets the column name used as primary key
*
* @return string Returns the column name
*/
public function get_primary_key_column_name()
{
return $this->primary_key;
}
/**
* Gets the table INSERT column names
* By default the insertion columns are all the columns from the table definition
* except by the primary key column
*
* @return array Returns the column names in an array of strings
*/
public function get_insert_columns()
{
$column_names = array_map(function ($item) {
return $item->column_name;
}, $this->table_fields);
unset($column_names[$this->primary_key]);
return $column_names;
}
/**
* Formats a value using a field definition
*
* @param DBDriver $driver The database connection driver
* @param string $column_name The column name
* @param mixed $value The value to format
* @return mixed The value formatted
*/
public function format_value($driver, $column_name, $value)
{
$field_definition = $this->table_fields[$column_name];
return $field_definition->format_value($driver, $value);
}
/**
* Formats a group of values into the current table definition format
*
* @param mixed $values Can be an group of values or an array of group of values
* @return mixed The formatted value
*/
public function format_values($values)
{
$driver = $this->urabe->get_driver();
$format_values_func = function ($driver, $data) {
$columns = array_keys(get_object_vars($data));
for ($i = 0; $i < count($columns); $i++)
$data->{$columns[$i]} = $this->format_value($driver, $columns[$i], $data->{$columns[$i]});
return $data;
};
//Format the entry values
if (is_array($values)) {
for ($i = 0; $i < count($values); $i++)
$values[$i] = $format_values_func($driver, $values[$i]);
return $values;
} else
return $format_values_func($driver, $values);
}
/**
* Gets the service manager by the verbose type
* @param string $verbose The service verbose type
* @return HasamiRESTfulService The service manager
*/
public function get_service($verbose)
{
return $this->services[$verbose];
}
/**
* Gets the service status assigned to the given service
* @param string $verbose The service verbose type
* @return ServiceStatus The service current status
*/
public function get_service_status($verbose)
{
return $this->services_status[$verbose];
}
/**
* Sets the service status to the given service name
* @param string $verbose The service verbose type
* @param ServiceStatus $status The service status
* @return void
*/
public function set_service_status($verbose, $status)
{
$this->services_status[$verbose] = $status;
}
/*******************
*** Constructor ***
*******************/
/**
* __construct
*
* Initialize a new instance of a HasamiWrapper Class
* @param string $full_table_name The full table name, used to wrap SELECT, UPDATE, INSERT AND DELETE actions
* @param KanojoX $connector The database connector
* @param string|NULL $primary_key The name of the primary key.
* @param FieldDefinition[] $table_definition The table definition, if null
* the table definition are obtained via a selection query.
*/
public function __construct($full_table_name, $connector, $primary_key = null, $table_definition = null)
{
$this->table_name = $full_table_name;
$this->urabe = new Urabe($connector);
$this->primary_key = $primary_key;
//Selecting table definition and table definition parser
if (is_null($table_definition) && table_definition_exists($this->table_name)) {
$this->table_fields = load_table_definition($this->table_name);
} else if (is_null($table_definition)) {
$this->table_fields = $this->urabe->get_table_definition($this->table_name);
save_table_definition($full_table_name, $connector->db_driver, $this->table_fields);
} else
$this->table_fields = $table_definition;
//Get the request content
$this->request_data = new WebServiceContent();
//Initialize services
$this->services = $this->init_services();
$this->services_status = $this->init_service_status();
//Start with the table definition parser
$this->set_parser($this->table_fields);
}
protected function set_parser($table_def)
{
$this->urabe->set_parser(new MysteriousParser($table_def));
}
/**
* Initialize the services for HasamiWrapper
*
* @return array The Restful services supported by this wrapper
*/
protected function init_services()
{
$condition = $this->request_data->build_simple_condition($this->primary_key);
$this->set_selection_filter(is_null($this->request_data->get_filter()) ? null : $this->primary_key . "=" . $this->request_data->get_filter());
return array(
"GET" => new GETService($this),
"PUT" => new PUTService($this),
"POST" => new POSTService($this, $condition),
"DELETE" => new DELETEService($this, $condition)
);
}
/**
* Initialize the service status for the HasamiWrapper
* The default configuration can be set in the Urabe settings
*
* @return array The Restful services supported by this wrapper
*/
protected function init_service_status()
{
return array(
"GET" => KanojoX::$settings->default_GET_status,
"PUT" => KanojoX::$settings->default_PUT_status,
"POST" => KanojoX::$settings->default_POST_status,
"DELETE" => KanojoX::$settings->default_DELETE_status,
);
}
/**
* Gets the service status
*/
public function get_status()
{
$keys = array_keys($this->services_status);
$status = array();
foreach ($keys as &$key)
$status[$key] = ServiceStatus::getName($this->get_service_status($key));
return (object)array(
"Status" => $status,
"Content" => $this->request_data,
"Connection" => $this->urabe->get_connection_data(),
"Table" => array(
"name" => $this->table_name,
"primary_key" => $this->primary_key,
"columns" => $this->table_fields,
"selection_filter" => $this->selection_filter
),
"Actions" => $this->get_available_actions(),
"Filter" => $this->selection_filter,
);
}
/**
* Sets the service desired task for the given request method
*
* @param string $request_method The request method verbose.
* GET, POST, PUT, DELETE, etc.
* @param mixed $task The task name or the callback to execute
* @return void
*/
public function set_service_task($request_method, $task)
{
$service = $this->get_service($request_method);
$service->service_task = $task;
}
/**
* Gets the service response
* First check if an action exists on the service, The action service is passed in the GET Variable action
* If the action exists but is not defined an exception is thrown, if no action is passed the task is directly taken
* from the Request method wrapper.
*
* @return UrabeResponse|string The web service response, if the PP variable is found in GET Variables, the result is a formatted HTML
**/
public function get_response()
{
try {
$request_method = $this->request_data->method;
$service = $this->get_service($request_method);
if (in_array(VAR_URABE_ACTION, array_keys($this->request_data->get_variables))) {
$actions = $this->get_available_actions();
$action = $this->request_data->get_variables[VAR_URABE_ACTION];
$isSupported = array_key_exists($request_method, $this->services);
//Execute if the action exist otherwise throw an Exception
if (in_array($action, $actions)) //Select urabe action instead of service default action
$service->service_task = CAP_URABE_ACTION . $action;
else {
http_response_code(500);
throw new Exception(sprintf(ERR_INVALID_ACTION, $action));
}
}
$result = $this->get_service_response($service, $request_method);
//Only formats if PP is in the URL
return $this->format_result($result);
} catch (Exception $e) {
throw new Exception(ERR_SERVICE_RESPONSE . $e->getMessage(), $e->getCode());
}
}
/**
* This functions formats the web service result if the PP format is presented
* in the URL. The web service response is returned in a HTML string
*
* @param UrabeResponse $result The web service result response
* @return string|UrabeResponse The web service response
*/
protected function format_result($result)
{
//If pretty print is enable prints result with HTML format
if (in_array(KEY_PRETTY_PRINT, $this->request_data->url_params)) {
if (in_array(KEY_PRETTY_PRINT, $this->request_data->get_variables_names())) {
$style_name = $this->request_data->get_variables[KEY_PRETTY_PRINT];
switch (strtolower($style_name)) {
case "light":
$style = KanojoX::$settings->light_pp_style;
$bg = false;
break;
case "dark":
$style = KanojoX::$settings->dark_pp_style;
$bg = true;
break;
default:
$style = KanojoX::$settings->default_pp_style;
$bg = KanojoX::$settings->default_pp_bg;
break;
}
$result = pretty_print_format($result, $style, $bg);
} else
$result = pretty_print_format($result, KanojoX::$settings->default_pp_style, KanojoX::$settings->default_pp_bg);
}
return $result;
}
/**
* This functions validates the access of a service called via verbose
* Can be used to validate a login or a group access validation, this function should be overwritten in
* the child class.
*
* By default returns true
* @return boolean True if the validation access succeed
*/
protected function validate_access()
{
return true;
}
/**
* Gets the web service response
* @param HasamiRESTfulService $service The current web service
* @param string $request_method The request method verbose
* @throws Exception An exception is thrown if an error occurred executing the web request
* @return UrabeResponse The web service response
*/
private function get_service_response($service, $request_method)
{
try {
if (isset($service)) {
$status = $this->get_service_status($request_method);
if ($status == ServiceStatus::AVAILABLE || ($status == ServiceStatus::LOGGED && $this->validate_access())) {
http_response_code(200);
return $service->get_response();
} else if ($status == ServiceStatus::LOGGED) {
KanojoX::$http_error_code = 403;
throw new Exception(sprintf(ERR_SERVICE_RESTRICTED, $request_method));
} else {
KanojoX::$http_error_code = 500;
throw new Exception(sprintf(ERR_VERBOSE_NOT_SUPPORTED, $request_method));
}
} else {
KanojoX::$http_error_code = 500;
throw new Exception(sprintf(ERR_VERBOSE_NOT_SUPPORTED, $request_method));
}
} catch (Exception $e) {
throw new Exception($e->getMessage(), $e->getCode());
}
}
/**
* This function list all available web service special actions
* all actions are identified by starting with the prefix u_action
* @return array The list of available actions inside an array
*/
private function get_available_actions()
{
$class_name = get_class($this);
$class = new ReflectionClass($class_name);
$methods = $class->getMethods(ReflectionMethod::IS_PUBLIC);
$actions = array();
$uSize = strlen(CAP_URABE_ACTION);
foreach ($methods as &$method) {
if ($method->class == $class_name && substr($method->name, 0, $uSize) == CAP_URABE_ACTION)
array_push($actions, substr($method->name, $uSize));
}
return $actions;
}
}
?><file_sep>/src/UrabeResponse.php
<?php
/**
* Urabe Response Class
*
* This class encapsulates a service response
* @api Makoto Urabe DB Manager Oracle
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
class UrabeResponse
{
/**
* @var mixed[] The query result data
*/
public $result;
/**
* @var string|null The query error if exists
*/
public $error;
/**
* @var string The SQL query
*/
public $query;
/**
* @var bool The query result status
*/
public $query_result;
/**
* @var int The result size
*/
public function get_size_result()
{
return sizeof($this->result);
}
/**
* Gets the response message for exception
* @param string $msg The response message
* @param object|null $stack_trace The stack trace result, optional
* @return object The response message
*/
public function get_exception_response($msg, $stack_trace = null)
{
return $this->format_exception_response($msg, $this->error, $stack_trace);
}
/**
* Gets the response message for exception
* @param Exception $exc The executed exception
* @param string|null $stack_trace The stack trace result, optional
* @return object The response message
*/
public function get_simple_exception_response($exc, $stack_trace = null)
{
$error = array(NODE_CODE => $exc->getCode(), NODE_FILE => $exc->getFile(), NODE_LINE => $exc->getLine());
return $this->format_exception_response($exc->getMessage(), $error, $stack_trace);
}
/**
* Formats the Urabe exception response
*
* @param string $msg The exception message
* @param string $error The exception error definition
* @param string $stack_trace If allowed in application settings the error $stack_trace
* @return object The response message
*/
private function format_exception_response($msg, $error, $stack_trace = null)
{
if (KanojoX::$settings->hide_exception_error)
$error = (object)(array(NODE_MSG => $msg, NODE_RESULT => array(), NODE_SIZE => 0, NODE_ERROR => null));
else
$error = (object)(array(NODE_MSG => $msg, NODE_RESULT => array(), NODE_SIZE => 0, NODE_ERROR => $error));
if (!is_null($stack_trace))
$error->{NODE_STACK} = $stack_trace;
return $error;
}
/**
* Gets the response message for a successful request
*
* @param string $msg The response message
* @param array $result The response result
* @param string $sql The SQL statement
* @return object The response message
*/
public function get_response($msg, $result, $sql = null)
{
$this->query = $sql;
$count = sizeof($result);
$response = array(NODE_MSG => $msg, NODE_RESULT => $result, NODE_SIZE => sizeof($result), NODE_ERROR => null);
if (isset($this->query) && KanojoX::$settings->add_query_to_response)
$response[NODE_QUERY] = $this->query;
return (object)($response);
}
/**
* Gets the response message for a successful executed query response
*
* @param string $succeed True if the execute query succeed
* @param int $affected_rows The number of affected rows after a successful query
* @param string $sql The SQL statement
* @return object The response message
*/
public function get_execute_response($succeed, $affected_rows, $sql = null)
{
$this->query = $sql;
$count = sizeof($this->result);
$response = array(NODE_SUCCEED => $succeed, NODE_AFF_ROWS => $affected_rows, NODE_RESULT => array(), NODE_ERROR => null);
if (isset($this->query) && KanojoX::$settings->add_query_to_response)
$response[NODE_QUERY] = $this->query;
return (object)($response);
}
}
?><file_sep>/src/DELETEService.php
<?php
include_once "HasamiRESTfulService.php";
/**
* DELETE Service Class
* This class defines a restful service with a request verbose DELETE.
* This method is often used to delete or access protected data from the database.
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
class DELETEService extends HasamiRESTfulService
{
/**
* @var string The delete condition
*/
public $delete_condition;
/**
* __construct
*
* Initialize a new instance of the DELETE Service class.
* A default service task is defined as a callback using the function DELETEService::default_DELETE_action
*
* @param IHasami $wrapper The web service wrapper
* @param string $delete_condition The delete condition
*/
public function __construct($wrapper, $delete_condition = null)
{
$data = $wrapper->get_request_data();
$data->extra->{TAB_NAME} = $wrapper->get_table_name();
$data->extra->{CAP_DELETE} = is_null($delete_condition) ? null : $delete_condition;
$urabe = $wrapper->get_urabe();
parent::__construct($data, $urabe);
$this->wrapper = $wrapper;
$this->service_task = function ($data, $urabe) {
return $this->default_DELETE_action($data, $urabe);
};
}
/**
* Wraps the delete function from urabe
* @param string $table_name The table name.
* @param string $condition The condition to match
* @throws Exception An Exception is raised if the connection is null or executing a bad query
* @return UrabeResponse Returns the service response formatted as an executed response
*/
public function delete($table_name, $condition)
{
return $this->urabe->delete($table_name, $condition);
}
/**
* Wraps the delete_by_field function from urabe
*
* @param string $table_name The table name.
* @param string $column_name The column name used in the condition.
* @param string $column_value The column value used in the condition.
* @throws Exception An Exception is raised if the connection is null or executing a bad query
* @return UrabeResponse Returns the service response formatted as an executed response
*/
public function delete_by_field($table_name, $column_name, $column_value)
{
return $this->urabe->delete_by_field($table_name, $column_name, $column_value);
}
/**
* Defines the default DELETE action, by default deletes all values that match a condition
* A condition is needed to delete values.
* @param WebServiceContent $data The web service content
* @param Urabe $urabe The database manager
* @throws Exception An Exception is thrown if the response can be processed correctly
* @return UrabeResponse The server response
*/
protected function default_DELETE_action($data, $urabe)
{
try {
$table_name = $data->extra->{TAB_NAME};
//Validate body
$condition = $data->extra->{CAP_DELETE};
//A Condition is obligatory to update
if (is_null($condition))
throw new Exception(sprintf(ERR_MISSING_CONDITION, CAP_UPDATE));
$column_name = array_keys($condition)[0];
$column_value = $this->wrapper->format_value($urabe->get_driver(), $column_name, $condition[$column_name]);
//Build delete query
$response = $this->delete_by_field($table_name, $column_name, $column_value);
return $response;
} catch (Exception $e) {
throw new Exception("Error Processing Request, " . $e->getMessage(), $e->getCode());
}
}
}
?><file_sep>/src/resources/EnumErrorMessages_en.php
<?php
/**
* Defines error messages executed by the Enum class
* @version 1.0.0
* @api Makoto Urabe DB Manager Oracle
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
/**
* @var string ERR_ENUM_INVALID_VALUE
* The error message sent when a given value is not found in the ENUM
*/
const ERR_ENUM_INVALID_VALUE = "The value '%s', is not a valid value for the given ENUM '%s'.";
/**
* @var string ERR_ENUM_INVALID_NAME
* The error message sent when a given name is not defined in the ENUM
*/
const ERR_ENUM_INVALID_NAME = "The name '%s', is not defined for the given ENUM '%s'.";
?><file_sep>/testing/WebServiceContentTester.php
<?php
include_once "UrabeTestUtils.php";
/**
* This file test the functionality for the class Web Service Content
*
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
//Settings is loaded when Kanojo class is created, in this test the connector driver is not important
$kanojo = pick_connector('ORACLE', null);
/**
* Url parameters are set after the .php file
*
*/
$content = new WebServiceContent();
//See the service request content
echo json_encode($content);
?><file_sep>/README.md
# URABE-PHP-CRUD-API
`Urabe` is a CRUD and database transaction manager divided in three layers, the first layer is called `KanojoX` acts as a connection manager, wraps the `php-resources` most used functions such as `connect`, `close_connection`,`query`, `fecth_assoc` and `error`. Currently the supported drivers are _ORACLE_, _PG_ and _MYSQL_, each driver is associated with a `KanojoX` class, `ORACLEKanojoX`,`PGKanojoX` and `MYSQLKanojoX`. To learn more about the use of `KanojoX` visit the wiki[[1](https://github.com/ANamelessWolf/urabe/wiki/KanojoX,-The-database-connector)].
The second layer is called `Urabe`, this layer is created from a `KanojoX` object and wraps most common SQL functions allowing to work transparently between database without changing our code. The function include alias for selecting data, updating, deleting, inserting or other query execution. To learn more about the use of `Urabe` visit the wiki[[2](https://github.com/ANamelessWolf/urabe/wiki/Urabe-Class,-Introduction)].
The last layer is called `HasamiWrapper`, this layer manage the CRUD request using `Urabe` as the database manager and the `WebServiceContent` class as the request content. Currently supported verbose `GET`,`POST`,`UPDATE`,`DELETE`. To learn more about the use of `Urabe` visit the wiki[[3]()].
## How to use it
Create a new class that extends from `HasamiWrapper`, define the connection data and the table to query.
The connection data is specified at the constructor. Lets say we want to make a service that manage the table users.

```php
include_once "urabe/HasamiWrapper.php";
class MyService extends HasamiWrapper
{
const TABLE_NAME = "users";
public function __construct()
{
$kanojo = new PGKanojoX();
$kanojo->schema = 'mySchema';
$conn = (object)array(
"host"=> 'localhost',
"user_name"=> "postgres",
"password"=>"<PASSWORD>",
"port"=>5432,
"db_name"=>'mydb');
$kanojo->init($conn);
$full_table_name = $kanojo->schema . "." . self::TABLE_NAME
parent::__construct($full_table_name, $connector, "id");
}
}
```
In other script lets call it `myServiceEndPoint.php` write the following script. Remember to include path to the class file.
```php
include_once "MyService.php";
$service = new MyService();
$result = $service->get_response();
echo json_encode($result, JSON_PRETTY_PRINT);
```
To access the table we're going to send a web request to the service endpoint, lets say is located in the path `http://127.0.0.1/mySite/myServiceEndPoint.php`.
### Select data
Now to select data sending a **GET** request, simple request no parameters needed. The response depends of the configuration defined in the [UrabeSettings.php](https://github.com/ANamelessWolf/urabe/blob/master/src/UrabeSettings.php) file.
**Example request:**
```curl
curl --request GET \ --url 'http://127.0.0.1/mySite/myServiceEndPoint.php'
```
**Example response:**
```json
{
"message": "Selection succeed",
"result": [
{
"id": 2,
"u_name": "mike",
"u_pass": "<PASSWORD>",
},
{
"id": 3,
"u_name": "user",
"u_pass": "ua",
}
],
"size": 2,
"error": null,
}
```
To select an user that matches and Id, use the reserve name `filter` as **GET** variable to allows the response to be filtered by id.
**Example request:**
```curl
curl --request GET \ --url 'http://127.0.0.1/mySite/myServiceEndPoint.php?filter=3'
```
**Example response:**
```json
{
"message": "Selection succeed",
"result": [
{
"id": 3,
"u_name": "user",
"u_pass": "ua",
}
],
"size": 1,
"error": null,
}
```
Now to update, insert and delete, by default the wrapper has this verbose **PUT**, **POST** and **DELETE** blocked, to unblocked them, you can change the `default_*_status` parameters in the [UrabeSettings.php](https://github.com/ANamelessWolf/urabe/blob/master/src/UrabeSettings.php) or adding the following lines in the constructor.
```php
$this->set_service_status("PUT", ServiceStatus::AVAILABLE);
$this->set_service_status("DELETE", ServiceStatus::AVAILABLE);
$this->set_service_status("POST", ServiceStatus::AVAILABLE);
```
### Insert data
To insert a new user, the request has to be send as **PUT** and the request body has to be in JSON, and contains the structure:
```json
"insert_values": {
"columns": []
"values": { }
}
```
Let insert a new user.
**Example request:**
```curl
curl --request PUT \ --url 'http://127.0.0.1/mySite/myServiceEndPoint.php' \
--header 'Content-Type: application/json' \
--data '{ "insert_values": { "columns": [ "u_name", "u_pass" ], "values": { "u_name": "addedUser", "u_pass": "<PASSWORD>" } } }'
```
**Example response:**
```json
{
"succeed": true,
"affected_rows": 1,
"result": [],
"error": null,
}
```
### Update data
To update an user, the request has to be send as **POST** and the request body has to be in JSON, and contains the structure:
```json
{
"values": { },
"condition": value
}
```
By default the condition is constructed making the primary key value equals to the condition passed value.
**Example request:** This Example updates the user password were the user id is equals to 3.
```curl
curl --request POST \ --url 'http://127.0.0.1/mySite/myServiceEndPoint.php' \
--header 'Content-Type: application/json' \
--data '{ "values": { "u_pass": "<PASSWORD>" }, "condition": 3 }
```
**Example response:**
```json
{
"succeed": true,
"affected_rows": 1,
"result": [],
"error": null
}
```
### Delete data
To deletes an user, the request has to be send as **DELETE** and the request body has to be in JSON, and contains the structure:
```json
{
"condition": value
}
```
By default the condition is constructed making the primary key value equals to the condition passed value.
**Example request:** This Example deletes the user were the user id is equals to 5.
```curl
curl --request DELETE \ --url 'http://127.0.0.1/mySite/myServiceEndPoint.php' \
--header 'Content-Type: application/json' \
--data '{ "condition": 3 }
```
**Example response:**
```json
{
"succeed": true,
"affected_rows": 1,
"result": [],
"error": null
}
```
This concludes the easy guide to the Urabe-API for advance petitions and costume calls visit the Wiki `HasamiWrapper` section.
<file_sep>/src/Enum.php
<?php
require_once "resources/Warai.php";
/**
* This class wraps an ENUM definition, this idea was solve using the post of Brian Cline
* under the stack overflow platform
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
* @example location description https://stackoverflow.com/questions/254514/php-and-enumerations?answertab=active#tab-top
*/
abstract class Enum
{
/**
* @var mixed The enum constants
*/
private static $constCacheArray = null;
/**
* Retrieves the enum constants as an a key value paired array
* Once the constants are retrieve their values are stored in $constCacheArray
* @return array The enum constants inside an array
*/
private static function getConstants()
{
if (self::$constCacheArray == null)
self::$constCacheArray = [];
$calledClass = get_called_class();
if (!array_key_exists($calledClass, self::$constCacheArray)) {
$reflect = new ReflectionClass($calledClass);
self::$constCacheArray[$calledClass] = $reflect->getConstants();
}
return self::$constCacheArray[$calledClass];
}
/**
* Check if the given name is a valid member of
* the ENUM class
*
* @param string $name The name to validate
* @param boolean $strict True if case sensitive will apply to compare the name
* @return boolean True if the name is a valid ENUM member
*/
public static function isValidName($name, $strict = false)
{
$constants = self::getConstants();
if ($strict)
return array_key_exists($name, $constants);
$keys = array_map('strtolower', array_keys($constants));
return in_array(strtolower($name), $keys);
}
/**
* Check if the given value belongs to an ENUM member
*
* @param string $value The value to validate
* @param boolean $strict True if case sensitive will apply to compare the value
* @return boolean True if the value belongs to an ENUM member
*/
public static function isValidValue($value, $strict = true)
{
$values = array_values(self::getConstants());
return in_array($value, $values, $strict);
}
/**
* Gets the member name assigned to a value
*
* @param mixed $value The value to extract its member name
* @param boolean $strict True if case sensitive will apply to compare the value
* @throws Exception An exception is thrown if the value does not belong to an Enum member
* @return string The member name
*/
public static function getName($value, $strict = true)
{
$constants = self::getConstants();
$values = array_values($constants);
if (in_array($value, $values, $strict)) {
$index = array_search($value, $values);
return array_keys($constants)[$index];
} else
throw new Exception(sprintf(ERR_ENUM_INVALID_VALUE, $value, get_called_class()));
}
/**
* Gets the value assigned to an ENUM member
*
* @param string $name The ENUM member name
* @param boolean $strict True if case sensitive will apply to compare the ENUM member name
* @throws Exception An exception is thrown if the name is not a member name
* @return mixed The ENUM member value
*/
public static function getValue($name, $strict = true)
{
$constants = self::getConstants();
if ($strict)
$keys = array_key_exists($name, $constants);
else
$keys = array_map('strtolower', array_keys($constants));
if (array_key_exists($name, $keys)) {
$index = array_search($name, $keys);
return array_values($constants)[$index];
} else
throw new Exception(sprintf(ERR_ENUM_INVALID_NAME, $name, get_called_class()));
return array_keys($constants)[$index];
}
}
?><file_sep>/src/MYSQLKanojoX.php
<?php
include_once "KanojoX.php";
/**
* A MySQL Connection object
*
* Kanojo means girlfriend in japanese and this class saves the connection data structure used to connect to
* an MySQL database.
* @version 1.0.0
* @api Makoto Urabe DB Manager
* @author <NAME> <<EMAIL>>
* @copyright 2015-2020 Nameless Studios
*/
class MYSQLKanojoX extends KanojoX
{
/**
* @var string DEFAULT_CHAR_SET
* The default char set, is UTF8
*/
const DEFAULT_CHAR_SET = 'utf8';
/**
* Initialize a new instance of the connection object
*/
public function __construct()
{
parent::__construct();
$this->db_driver = DBDriver::MYSQL;
}
/**
* Closes a connection
*
* @return bool Returns TRUE on success or FALSE on failure.
*/
public function close()
{
$this->free_result();
if (!$this->connection)
throw new Exception(ERR_NOT_CONNECTED);
return mysqli_close($this->connection);
}
/**
* Frees the memory associated with a result
*
* @return void
*/
public function free_result()
{
foreach ($this->statementsIds as &$statementId)
mysqli_free_result($statementId);
}
/**
* Open a MySQL Database connection
*
* @return stdClass The database connection object
*/
public function connect()
{
try {
$host = $this->host;
$port = $this->port;
$dbname = $this->db_name;
$username = $this->user_name;
$passwd = $this->password;
$this->connection = mysqli_connect($host, $username, $passwd, $dbname, $port);
if ($this->connection)
$this->connection->set_charset(self::DEFAULT_CHAR_SET);
return $this->connection;
} catch (Exception $e) {
return error(sprintf(ERR_BAD_CONNECTION, $e->getMessage()));
}
}
/**
* Get the last error message string of a connection
*
* @param string|null $sql The last executed statement. Can be null
* @param ConnectionError $error If the error exists pass the error
* @return ConnectionError The connection error
*/
public function error($sql, $error = null)
{
if (is_null($error)) {
$error = new ConnectionError();
$error->code = $this->connection->connect_errno;
$error->message = $this->connection->error;
$error->sql = $sql;
return $error;
} else
$this->error = $error;
return $this->error;
}
/**
* Sends a request to execute a prepared statement with given parameters,
* and waits for the result
*
* @param string $sql The SQL Statement
* @param array|null $variables The colon-prefixed bind variables placeholder used in the statement, can be null.
* @throws Exception An Exception is raised if the connection is null or executing a bad query
* @return UrabeResponse Returns the service response formatted as an executed response
*/
public function execute($sql, $variables = null)
{
if (!$this->connection)
throw new Exception(ERR_NOT_CONNECTED);
$statement = $this->parse($this->connection, $sql, $variables);
$class = get_class($statement);
if ($class == CLASS_ERR)
throw (!is_null($statement->sql) ? new UrabeSQLException($this->error($sql)) : new Exception($statement->error, $statement->errno));
else {
$ok = $statement->execute();
if ($ok) {
array_push($this->statementsIds, $statement);
return (new UrabeResponse())->get_execute_response(true, $statement->affected_rows, $sql);
} else
throw new UrabeSQLException($this->error($sql));
}
}
/**
* Returns an associative array containing the next result-set row of a
* query. Each array entry corresponds to a column of the row.
*
* @param string $sql The SQL Statement
* @param array $variables The colon-prefixed bind variables placeholder used in the statement.
* @throws Exception An Exception is thrown parsing the SQL statement or by connection error
* @return array Returns an associative array.
* */
public function fetch_assoc($sql, $variables = null)
{
$rows = array();
if (!$this->connection)
throw new Exception(ERR_NOT_CONNECTED);
$statement = $this->parse($this->connection, $sql, $variables);
$class = get_class($statement);
if ($class == CLASS_ERR)
throw (!is_null($statement->sql) ? new UrabeSQLException($this->error($sql)) : new Exception($statement->error, $statement->errno));
else {
array_push($this->statementsIds, $statement);
$ok = $statement->execute();
if ($ok) {
$result = $statement->get_result();
while ($row = $result->fetch_assoc())
$this->parser->parse($rows, $row);
} else
throw new UrabeSQLException($this->error($sql));
}
return $rows;
}
/**
* Prepares sql_text using connection and returns the statement identifier,
* which can be used with execute().
* @param mysqli $link MySQL active connection
* @param string $sql The SQL text statement
* @return mysqli_stmt Returns a statement handle on success,
* or a connection Error.
*/
private function parse($link, $sql, $variables = null)
{
if (!$link)
throw new Exception(ERR_NOT_CONNECTED);
$statement = $link->prepare($sql);
if ($statement && isset($variables) && is_array($variables))
$this->bind($statement, $variables);
return $statement ? $statement : $this->error($sql);
}
/**
* Binds a PHP variable to an Oracle placeholder
*
* @param resource $statement
* @param array $variables The colon-prefixed bind variables placeholder used in the statement.
* @return void
*/
private function bind($statement, $variables)
{
$format = "";
$parameters = array();
foreach ($variables as &$value) {
if (is_int($value))
$tp = "i";
else if (is_double($value))
$tp = "d";
else if (is_string($value))
$tp = "s";
else
$tp = "b";
$format .= $tp;
}
array_push($parameters, $format);
foreach ($variables as &$value)
array_push($parameters, $value);
return call_user_func_array(array($statement, 'bind_param'), $this->refValues($parameters));
}
/**
* Converts an array in to a referenced values
*
* @param array $arr The array to referenced
* @return array The referenced values
*/
function refValues($arr)
{
if (strnatcmp(phpversion(), '5.3') >= 0) //Reference is required for PHP 5.3+
{
$refs = array();
foreach ($arr as $key => $value)
$refs[$key] = &$arr[$key];
return $refs;
}
return $arr;
}
/**
* Gets the query for selecting the table definition
*
* @param string $table_name The table name
* @return string The table definition selection query
*/
public function get_table_definition_query($table_name)
{
$fields = MYSQL_FIELD_COL_ORDER . ", " . MYSQL_FIELD_COL_NAME . ", " . MYSQL_FIELD_DATA_TP . ", " .
MYSQL_FIELD_CHAR_LENGTH . ", " . MYSQL_FIELD_NUM_PRECISION . ", " . MYSQL_FIELD_NUM_SCALE;
if (isset($this->schema)) {
$schema = $this->schema;
$sql = "SELECT $fields FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_NAME` = '$table_name' AND `TABLE_SCHEMA` = '$this->db_name'";
} else
$sql = "SELECT $fields FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_NAME` = '$table_name'";
return $sql;
}
/**
* Gets the table definition parser for the MySQL connector
*
* @return array The table definition fields as an array of FieldDefinition
*/
public function get_table_definition_parser()
{
$fields = array(
MYSQL_FIELD_COL_ORDER => new FieldDefinition(0, MYSQL_FIELD_COL_ORDER, PARSE_AS_INT),
MYSQL_FIELD_COL_NAME => new FieldDefinition(1, MYSQL_FIELD_COL_NAME, PARSE_AS_STRING),
MYSQL_FIELD_DATA_TP => new FieldDefinition(2, MYSQL_FIELD_DATA_TP, PARSE_AS_STRING),
MYSQL_FIELD_CHAR_LENGTH => new FieldDefinition(3, MYSQL_FIELD_CHAR_LENGTH, PARSE_AS_INT),
MYSQL_FIELD_NUM_PRECISION => new FieldDefinition(4, MYSQL_FIELD_NUM_PRECISION, PARSE_AS_INT),
MYSQL_FIELD_NUM_SCALE => new FieldDefinition(5, MYSQL_FIELD_NUM_SCALE, PARSE_AS_INT)
);
return $fields;
}
/**
* Gets the table definition mapper for the MySQL connector
*
* @return array The table mapper as KeyValued<String,String> array
*/
public function get_table_definition_mapper()
{
$map = array(
MYSQL_FIELD_COL_ORDER => TAB_DEF_INDEX,
MYSQL_FIELD_COL_NAME => TAB_DEF_NAME,
MYSQL_FIELD_DATA_TP => TAB_DEF_TYPE,
MYSQL_FIELD_CHAR_LENGTH => TAB_DEF_CHAR_LENGTH,
MYSQL_FIELD_NUM_PRECISION => TAB_DEF_NUM_PRECISION,
MYSQL_FIELD_NUM_SCALE => TAB_DEF_NUM_SCALE
);
return $map;
}
}
?>
|
8c256da516c1cb5b4bf9da82486f5677580340ac
|
[
"Markdown",
"PHP"
] | 49 |
PHP
|
ANamelessWolf/urabe
|
60195da7967cf94e0c5947b21d746a28a43e1e50
|
d09a869a6c6f5170f8e4bd3a35a5efca31b29b07
|
refs/heads/master
|
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ldap
import (
"errors"
"fmt"
"strconv"
"strings"
"time"
ldap "github.com/go-ldap/ldap/v3"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/auth"
)
// LookupUserDN searches for the full DN and groups of a given username
func (l *Config) LookupUserDN(username string) (string, []string, error) {
conn, err := l.LDAP.Connect()
if err != nil {
return "", nil, err
}
defer conn.Close()
// Bind to the lookup user account
if err = l.LDAP.LookupBind(conn); err != nil {
return "", nil, err
}
// Lookup user DN
bindDN, err := l.LDAP.LookupUserDN(conn, username)
if err != nil {
errRet := fmt.Errorf("Unable to find user DN: %w", err)
return "", nil, errRet
}
groups, err := l.LDAP.SearchForUserGroups(conn, username, bindDN)
if err != nil {
return "", nil, err
}
return bindDN, groups, nil
}
// DoesUsernameExist checks if the given username exists in the LDAP directory.
// The given username could be just the short "login" username or the full DN.
// When the username is found, the full DN is returned, otherwise the returned
// string is empty. If the user is not found, err = nil, otherwise, err != nil.
func (l *Config) DoesUsernameExist(username string) (string, error) {
conn, err := l.LDAP.Connect()
if err != nil {
return "", err
}
defer conn.Close()
// Bind to the lookup user account
if err = l.LDAP.LookupBind(conn); err != nil {
return "", err
}
// Check if the passed in username is a valid DN.
parsedUsernameDN, err := ldap.ParseDN(username)
if err != nil {
// Since the passed in username was not a DN, we consider it as a login
// username and attempt to check it exists in the directory.
bindDN, err := l.LDAP.LookupUserDN(conn, username)
if err != nil {
if strings.Contains(err.Error(), "not found") {
return "", nil
}
return "", fmt.Errorf("Unable to find user DN: %w", err)
}
return bindDN, nil
}
// Since the username is a valid DN, check that it is under a configured
// base DN in the LDAP directory.
var foundDistName []string
for _, baseDN := range l.LDAP.UserDNSearchBaseDistNames {
// BaseDN should not fail to parse.
baseDNParsed, _ := ldap.ParseDN(baseDN)
if baseDNParsed.AncestorOf(parsedUsernameDN) {
searchRequest := ldap.NewSearchRequest(username, ldap.ScopeBaseObject, ldap.NeverDerefAliases,
0, 0, false, "(objectClass=*)", nil, nil)
searchResult, err := conn.Search(searchRequest)
if err != nil {
// Check if there is no matching result.
// Ref: https://ldap.com/ldap-result-code-reference/
if ldap.IsErrorWithCode(err, 32) {
continue
}
return "", err
}
for _, entry := range searchResult.Entries {
foundDistName = append(foundDistName, entry.DN)
}
}
}
if len(foundDistName) == 1 {
return foundDistName[0], nil
} else if len(foundDistName) > 1 {
// FIXME: This error would happen if the multiple base DNs are given and
// some base DNs are subtrees of other base DNs - we should validate
// and error out in such cases.
return "", fmt.Errorf("found multiple DNs for the given username")
}
return "", nil
}
// DoesGroupDNExist checks if the given group DN exists in the LDAP directory.
func (l *Config) DoesGroupDNExist(groupDN string) (bool, error) {
if len(l.LDAP.GroupSearchBaseDistNames) == 0 {
return false, errors.New("no group search Base DNs given")
}
gdn, err := ldap.ParseDN(groupDN)
if err != nil {
return false, fmt.Errorf("Given group DN could not be parsed: %s", err)
}
conn, err := l.LDAP.Connect()
if err != nil {
return false, err
}
defer conn.Close()
// Bind to the lookup user account
if err = l.LDAP.LookupBind(conn); err != nil {
return false, err
}
var foundDistName []string
for _, baseDN := range l.LDAP.GroupSearchBaseDistNames {
// BaseDN should not fail to parse.
baseDNParsed, _ := ldap.ParseDN(baseDN)
if baseDNParsed.AncestorOf(gdn) {
searchRequest := ldap.NewSearchRequest(groupDN, ldap.ScopeBaseObject, ldap.NeverDerefAliases, 0, 0, false, "(objectClass=*)", nil, nil)
searchResult, err := conn.Search(searchRequest)
if err != nil {
// Check if there is no matching result.
// Ref: https://ldap.com/ldap-result-code-reference/
if ldap.IsErrorWithCode(err, 32) {
continue
}
return false, err
}
for _, entry := range searchResult.Entries {
foundDistName = append(foundDistName, entry.DN)
}
}
}
if len(foundDistName) == 1 {
return true, nil
} else if len(foundDistName) > 1 {
// FIXME: This error would happen if the multiple base DNs are given and
// some base DNs are subtrees of other base DNs - we should validate
// and error out in such cases.
return false, fmt.Errorf("found multiple DNs for the given group DN")
}
return false, nil
}
// Bind - binds to ldap, searches LDAP and returns the distinguished name of the
// user and the list of groups.
func (l *Config) Bind(username, password string) (string, []string, error) {
conn, err := l.LDAP.Connect()
if err != nil {
return "", nil, err
}
defer conn.Close()
var bindDN string
// Bind to the lookup user account
if err = l.LDAP.LookupBind(conn); err != nil {
return "", nil, err
}
// Lookup user DN
bindDN, err = l.LDAP.LookupUserDN(conn, username)
if err != nil {
errRet := fmt.Errorf("Unable to find user DN: %w", err)
return "", nil, errRet
}
// Authenticate the user credentials.
err = conn.Bind(bindDN, password)
if err != nil {
errRet := fmt.Errorf("LDAP auth failed for DN %s: %w", bindDN, err)
return "", nil, errRet
}
// Bind to the lookup user account again to perform group search.
if err = l.LDAP.LookupBind(conn); err != nil {
return "", nil, err
}
// User groups lookup.
groups, err := l.LDAP.SearchForUserGroups(conn, username, bindDN)
if err != nil {
return "", nil, err
}
return bindDN, groups, nil
}
// GetExpiryDuration - return parsed expiry duration.
func (l Config) GetExpiryDuration(dsecs string) (time.Duration, error) {
if dsecs == "" {
return l.stsExpiryDuration, nil
}
d, err := strconv.Atoi(dsecs)
if err != nil {
return 0, auth.ErrInvalidDuration
}
dur := time.Duration(d) * time.Second
if dur < minLDAPExpiry || dur > maxLDAPExpiry {
return 0, auth.ErrInvalidDuration
}
return dur, nil
}
// IsLDAPUserDN determines if the given string could be a user DN from LDAP.
func (l Config) IsLDAPUserDN(user string) bool {
for _, baseDN := range l.LDAP.UserDNSearchBaseDistNames {
if strings.HasSuffix(user, ","+baseDN) {
return true
}
}
return false
}
// IsLDAPGroupDN determines if the given string could be a group DN from LDAP.
func (l Config) IsLDAPGroupDN(user string) bool {
for _, baseDN := range l.LDAP.GroupSearchBaseDistNames {
if strings.HasSuffix(user, ","+baseDN) {
return true
}
}
return false
}
// GetNonEligibleUserDistNames - find user accounts (DNs) that are no longer
// present in the LDAP server or do not meet filter criteria anymore
func (l *Config) GetNonEligibleUserDistNames(userDistNames []string) ([]string, error) {
conn, err := l.LDAP.Connect()
if err != nil {
return nil, err
}
defer conn.Close()
// Bind to the lookup user account
if err = l.LDAP.LookupBind(conn); err != nil {
return nil, err
}
// Evaluate the filter again with generic wildcard instead of specific values
filter := strings.ReplaceAll(l.LDAP.UserDNSearchFilter, "%s", "*")
nonExistentUsers := []string{}
for _, dn := range userDistNames {
searchRequest := ldap.NewSearchRequest(
dn,
ldap.ScopeBaseObject, ldap.NeverDerefAliases, 0, 0, false,
filter,
[]string{}, // only need DN, so pass no attributes here
nil,
)
searchResult, err := conn.Search(searchRequest)
if err != nil {
// Object does not exist error?
if ldap.IsErrorWithCode(err, 32) {
nonExistentUsers = append(nonExistentUsers, dn)
continue
}
return nil, err
}
if len(searchResult.Entries) == 0 {
// DN was not found - this means this user account is
// expired.
nonExistentUsers = append(nonExistentUsers, dn)
}
}
return nonExistentUsers, nil
}
// LookupGroupMemberships - for each DN finds the set of LDAP groups they are a
// member of.
func (l *Config) LookupGroupMemberships(userDistNames []string, userDNToUsernameMap map[string]string) (map[string]set.StringSet, error) {
conn, err := l.LDAP.Connect()
if err != nil {
return nil, err
}
defer conn.Close()
// Bind to the lookup user account
if err = l.LDAP.LookupBind(conn); err != nil {
return nil, err
}
res := make(map[string]set.StringSet, len(userDistNames))
for _, userDistName := range userDistNames {
username := userDNToUsernameMap[userDistName]
groups, err := l.LDAP.SearchForUserGroups(conn, username, userDistName)
if err != nil {
return nil, err
}
res[userDistName] = set.CreateStringSet(groups...)
}
return res, nil
}
<file_sep>#!/bin/bash
trap 'cleanup $LINENO' ERR
# shellcheck disable=SC2120
cleanup() {
MINIO_VERSION=dev docker-compose \
-f "buildscripts/upgrade-tests/compose.yml" \
rm -s -f
docker volume prune -f
}
verify_checksum_after_heal() {
local sum1
sum1=$(curl -s "$2" | sha256sum)
mc admin heal --json -r "$1" >/dev/null # test after healing
local sum1_heal
sum1_heal=$(curl -s "$2" | sha256sum)
if [ "${sum1_heal}" != "${sum1}" ]; then
echo "mismatch expected ${sum1_heal}, got ${sum1}"
exit 1
fi
}
verify_checksum_mc() {
local expected
expected=$(mc cat "$1" | sha256sum)
local got
got=$(mc cat "$2" | sha256sum)
if [ "${expected}" != "${got}" ]; then
echo "mismatch - expected ${expected}, got ${got}"
exit 1
fi
echo "matches - ${expected}, got ${got}"
}
add_alias() {
for i in $(seq 1 4); do
echo "... attempting to add alias $i"
until (mc alias set minio http://127.0.0.1:9000 minioadmin minioadmin); do
echo "...waiting... for 5secs" && sleep 5
done
done
echo "Sleeping for nginx"
sleep 20
}
__init__() {
sudo apt install curl -y
export GOPATH=/tmp/gopath
export PATH=${PATH}:${GOPATH}/bin
go install github.com/minio/mc@latest
TAG=minio/minio:dev make docker
MINIO_VERSION=RELEASE.2019-12-19T22-52-26Z docker-compose \
-f "buildscripts/upgrade-tests/compose.yml" \
up -d --build
add_alias
mc mb minio/minio-test/
mc cp ./minio minio/minio-test/to-read/
mc cp /etc/hosts minio/minio-test/to-read/hosts
mc anonymous set download minio/minio-test
verify_checksum_mc ./minio minio/minio-test/to-read/minio
curl -s http://127.0.0.1:9000/minio-test/to-read/hosts | sha256sum
MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" stop
}
main() {
MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" up -d --build
add_alias
verify_checksum_after_heal minio/minio-test http://127.0.0.1:9000/minio-test/to-read/hosts
verify_checksum_mc ./minio minio/minio-test/to-read/minio
verify_checksum_mc /etc/hosts minio/minio-test/to-read/hosts
cleanup
}
(__init__ "$@" && main "$@")
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"crypto/subtle"
"io"
"net/http"
"net/url"
"strings"
"time"
"github.com/klauspost/compress/gzhttp"
"github.com/lithammer/shortuuid/v4"
miniogo "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/mux"
"github.com/minio/pkg/bucket/policy"
"github.com/minio/minio/internal/auth"
levent "github.com/minio/minio/internal/config/lambda/event"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
)
func getLambdaEventData(bucket, object string, cred auth.Credentials, r *http.Request) (levent.Event, error) {
host := globalLocalNodeName
secure := globalIsTLS
if globalMinioEndpointURL != nil {
host = globalMinioEndpointURL.Host
secure = globalMinioEndpointURL.Scheme == "https"
}
duration := time.Until(cred.Expiration)
if duration > time.Hour || duration < time.Hour {
// Always limit to 1 hour.
duration = time.Hour
}
clnt, err := miniogo.New(host, &miniogo.Options{
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
Secure: secure,
Transport: globalRemoteTargetTransport,
Region: globalSite.Region,
})
if err != nil {
return levent.Event{}, err
}
reqParams := url.Values{}
if partNumberStr := r.Form.Get("partNumber"); partNumberStr != "" {
reqParams.Set("partNumber", partNumberStr)
}
for k := range supportedHeadGetReqParams {
if v := r.Form.Get(k); v != "" {
reqParams.Set(k, v)
}
}
extraHeaders := http.Header{}
if rng := r.Header.Get(xhttp.Range); rng != "" {
extraHeaders.Set(xhttp.Range, r.Header.Get(xhttp.Range))
}
u, err := clnt.PresignHeader(r.Context(), http.MethodGet, bucket, object, duration, reqParams, extraHeaders)
if err != nil {
return levent.Event{}, err
}
token, err := authenticateNode(cred.AccessKey, cred.SecretKey, u.RawQuery)
if err != nil {
return levent.Event{}, err
}
eventData := levent.Event{
GetObjectContext: &levent.GetObjectContext{
InputS3URL: u.String(),
OutputRoute: shortuuid.New(),
OutputToken: token,
},
UserRequest: levent.UserRequest{
URL: r.URL.String(),
Headers: r.Header.Clone(),
},
UserIdentity: levent.Identity{
Type: "IAMUser",
PrincipalID: cred.AccessKey,
AccessKeyID: cred.SecretKey,
},
}
return eventData, nil
}
var statusTextToCode = map[string]int{
"Continue": http.StatusContinue,
"Switching Protocols": http.StatusSwitchingProtocols,
"Processing": http.StatusProcessing,
"Early Hints": http.StatusEarlyHints,
"OK": http.StatusOK,
"Created": http.StatusCreated,
"Accepted": http.StatusAccepted,
"Non-Authoritative Information": http.StatusNonAuthoritativeInfo,
"No Content": http.StatusNoContent,
"Reset Content": http.StatusResetContent,
"Partial Content": http.StatusPartialContent,
"Multi-Status": http.StatusMultiStatus,
"Already Reported": http.StatusAlreadyReported,
"IM Used": http.StatusIMUsed,
"Multiple Choices": http.StatusMultipleChoices,
"Moved Permanently": http.StatusMovedPermanently,
"Found": http.StatusFound,
"See Other": http.StatusSeeOther,
"Not Modified": http.StatusNotModified,
"Use Proxy": http.StatusUseProxy,
"Temporary Redirect": http.StatusTemporaryRedirect,
"Permanent Redirect": http.StatusPermanentRedirect,
"Bad Request": http.StatusBadRequest,
"Unauthorized": http.StatusUnauthorized,
"Payment Required": http.StatusPaymentRequired,
"Forbidden": http.StatusForbidden,
"Not Found": http.StatusNotFound,
"Method Not Allowed": http.StatusMethodNotAllowed,
"Not Acceptable": http.StatusNotAcceptable,
"Proxy Authentication Required": http.StatusProxyAuthRequired,
"Request Timeout": http.StatusRequestTimeout,
"Conflict": http.StatusConflict,
"Gone": http.StatusGone,
"Length Required": http.StatusLengthRequired,
"Precondition Failed": http.StatusPreconditionFailed,
"Request Entity Too Large": http.StatusRequestEntityTooLarge,
"Request URI Too Long": http.StatusRequestURITooLong,
"Unsupported Media Type": http.StatusUnsupportedMediaType,
"Requested Range Not Satisfiable": http.StatusRequestedRangeNotSatisfiable,
"Expectation Failed": http.StatusExpectationFailed,
"I'm a teapot": http.StatusTeapot,
"Misdirected Request": http.StatusMisdirectedRequest,
"Unprocessable Entity": http.StatusUnprocessableEntity,
"Locked": http.StatusLocked,
"Failed Dependency": http.StatusFailedDependency,
"Too Early": http.StatusTooEarly,
"Upgrade Required": http.StatusUpgradeRequired,
"Precondition Required": http.StatusPreconditionRequired,
"Too Many Requests": http.StatusTooManyRequests,
"Request Header Fields Too Large": http.StatusRequestHeaderFieldsTooLarge,
"Unavailable For Legal Reasons": http.StatusUnavailableForLegalReasons,
"Internal Server Error": http.StatusInternalServerError,
"Not Implemented": http.StatusNotImplemented,
"Bad Gateway": http.StatusBadGateway,
"Service Unavailable": http.StatusServiceUnavailable,
"Gateway Timeout": http.StatusGatewayTimeout,
"HTTP Version Not Supported": http.StatusHTTPVersionNotSupported,
"Variant Also Negotiates": http.StatusVariantAlsoNegotiates,
"Insufficient Storage": http.StatusInsufficientStorage,
"Loop Detected": http.StatusLoopDetected,
"Not Extended": http.StatusNotExtended,
"Network Authentication Required": http.StatusNetworkAuthenticationRequired,
}
// StatusCode returns a HTTP Status code for the HTTP text. It returns -1
// if the text is unknown.
func StatusCode(text string) int {
if code, ok := statusTextToCode[text]; ok {
return code
}
return -1
}
func fwdHeadersToS3(h http.Header, w http.ResponseWriter) {
const trim = "x-amz-fwd-header-"
for k, v := range h {
if strings.HasPrefix(strings.ToLower(k), trim) {
w.Header()[k[len(trim):]] = v
}
}
}
func fwdStatusToAPIError(resp *http.Response) *APIError {
if status := resp.Header.Get(xhttp.AmzFwdStatus); status != "" && StatusCode(status) > -1 {
apiErr := &APIError{
HTTPStatusCode: StatusCode(status),
Description: resp.Header.Get(xhttp.AmzFwdErrorMessage),
Code: resp.Header.Get(xhttp.AmzFwdErrorCode),
}
if apiErr.HTTPStatusCode == http.StatusOK {
return nil
}
return apiErr
}
return nil
}
// GetObjectLamdbaHandler - GET Object with transformed data via lambda functions
// ----------
// This implementation of the GET operation applies lambda functions and returns the
// response generated via the lambda functions. To use this API, you must have READ access
// to the object.
func (api objectAPIHandlers) GetObjectLambdaHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetObjectLambda")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Check for auth type to return S3 compatible error.
cred, _, s3Error := checkRequestAuthTypeCredential(ctx, r, policy.GetObjectAction)
if s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
target, err := globalLambdaTargetList.Lookup(r.Form.Get("lambdaArn"))
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
eventData, err := getLambdaEventData(bucket, object, cred, r)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
resp, err := target.Send(eventData)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
defer resp.Body.Close()
if eventData.GetObjectContext.OutputRoute != resp.Header.Get(xhttp.AmzRequestRoute) {
tokenErr := errorCodes.ToAPIErr(ErrInvalidRequest)
tokenErr.Description = "The request route included in the request is invalid"
writeErrorResponse(ctx, w, tokenErr, r.URL)
return
}
if subtle.ConstantTimeCompare([]byte(resp.Header.Get(xhttp.AmzRequestToken)), []byte(eventData.GetObjectContext.OutputToken)) != 1 {
tokenErr := errorCodes.ToAPIErr(ErrInvalidToken)
tokenErr.Description = "The request token included in the request is invalid"
writeErrorResponse(ctx, w, tokenErr, r.URL)
return
}
// Set all the relevant lambda forward headers if found.
fwdHeadersToS3(resp.Header, w)
if apiErr := fwdStatusToAPIError(resp); apiErr != nil {
writeErrorResponse(ctx, w, *apiErr, r.URL)
return
}
if resp.StatusCode != http.StatusOK {
writeErrorResponse(ctx, w, APIError{
Code: "LambdaFunctionError",
HTTPStatusCode: resp.StatusCode,
Description: "unexpected failure reported from lambda function",
}, r.URL)
return
}
if !globalAPIConfig.shouldGzipObjects() {
w.Header().Set(gzhttp.HeaderNoCompression, "true")
}
io.Copy(w, resp.Body)
}
<file_sep>// Copyright (c) 2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"time"
)
type rebalPoolProgress struct {
NumObjects uint64 `json:"objects"`
NumVersions uint64 `json:"versions"`
Bytes uint64 `json:"bytes"`
Bucket string `json:"bucket"`
Object string `json:"object"`
Elapsed time.Duration `json:"elapsed"`
ETA time.Duration `json:"eta"`
}
type rebalancePoolStatus struct {
ID int `json:"id"` // Pool index (zero-based)
Status string `json:"status"` // Active if rebalance is running, empty otherwise
Used float64 `json:"used"` // Percentage used space
Progress rebalPoolProgress `json:"progress,omitempty"` // is empty when rebalance is not running
}
// rebalanceAdminStatus holds rebalance status related information exported to mc, console, etc.
type rebalanceAdminStatus struct {
ID string // identifies the ongoing rebalance operation by a uuid
Pools []rebalancePoolStatus `json:"pools"` // contains all pools, including inactive
StoppedAt time.Time `json:"stoppedAt,omitempty"`
}
func rebalanceStatus(ctx context.Context, z *erasureServerPools) (r rebalanceAdminStatus, err error) {
// Load latest rebalance status
meta := &rebalanceMeta{}
err = meta.load(ctx, z.serverPools[0])
if err != nil {
return r, err
}
// Compute disk usage percentage
si := z.StorageInfo(ctx)
diskStats := make([]struct {
AvailableSpace uint64
TotalSpace uint64
}, len(z.serverPools))
for _, disk := range si.Disks {
// Ignore invalid.
if disk.PoolIndex < 0 || len(diskStats) <= disk.PoolIndex {
// https://github.com/minio/minio/issues/16500
continue
}
diskStats[disk.PoolIndex].AvailableSpace += disk.AvailableSpace
diskStats[disk.PoolIndex].TotalSpace += disk.TotalSpace
}
stopTime := meta.StoppedAt
r = rebalanceAdminStatus{
ID: meta.ID,
StoppedAt: meta.StoppedAt,
Pools: make([]rebalancePoolStatus, len(meta.PoolStats)),
}
for i, ps := range meta.PoolStats {
r.Pools[i] = rebalancePoolStatus{
ID: i,
Status: ps.Info.Status.String(),
Used: float64(diskStats[i].TotalSpace-diskStats[i].AvailableSpace) / float64(diskStats[i].TotalSpace),
}
if !ps.Participating {
continue
}
// for participating pools, total bytes to be rebalanced by this pool is given by,
// pf_c = (f_i + x)/c_i,
// pf_c - percentage free space across pools, f_i - ith pool's free space, c_i - ith pool's capacity
// i.e. x = c_i*pfc -f_i
totalBytesToRebal := float64(ps.InitCapacity)*meta.PercentFreeGoal - float64(ps.InitFreeSpace)
elapsed := time.Since(ps.Info.StartTime)
eta := time.Duration(totalBytesToRebal * float64(elapsed) / float64(ps.Bytes))
if !ps.Info.EndTime.IsZero() {
stopTime = ps.Info.EndTime
}
if !stopTime.IsZero() { // rebalance is stopped or completed
elapsed = stopTime.Sub(ps.Info.StartTime)
eta = 0
}
r.Pools[i].Progress = rebalPoolProgress{
NumObjects: ps.NumObjects,
NumVersions: ps.NumVersions,
Bytes: ps.Bytes,
Elapsed: elapsed,
ETA: eta,
}
}
return r, nil
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package subnet
import (
"net/http"
"net/url"
"os"
"strings"
"sync"
"github.com/minio/minio/internal/config"
"github.com/minio/pkg/env"
xnet "github.com/minio/pkg/net"
)
// DefaultKVS - default KV config for subnet settings
var DefaultKVS = config.KVS{
config.KV{
Key: config.License, // Deprecated Dec 2021
Value: "",
},
config.KV{
Key: config.APIKey,
Value: "",
},
config.KV{
Key: config.Proxy,
Value: "",
},
}
// Config represents the subnet related configuration
type Config struct {
// The subnet license token - Deprecated Dec 2021
License string `json:"license"`
// The subnet api key
APIKey string `json:"apiKey"`
// The HTTP(S) proxy URL to use for connecting to SUBNET
Proxy string `json:"proxy"`
// Transport configured with proxy_url if set optionally.
transport http.RoundTripper
}
var configLock sync.RWMutex
// Registered indicates if cluster is registered or not
func (c *Config) Registered() bool {
configLock.RLock()
defer configLock.RUnlock()
return len(c.APIKey) > 0
}
// ApplyEnv - applies the current subnet config to Console UI specific environment variables.
func (c *Config) ApplyEnv() {
configLock.RLock()
defer configLock.RUnlock()
if c.License != "" {
os.Setenv("CONSOLE_SUBNET_LICENSE", c.License)
}
if c.APIKey != "" {
os.Setenv("CONSOLE_SUBNET_API_KEY", c.APIKey)
}
if c.Proxy != "" {
os.Setenv("CONSOLE_SUBNET_PROXY", c.Proxy)
}
}
// Update - in-place update with new license and registration information.
func (c *Config) Update(ncfg Config) {
configLock.Lock()
defer configLock.Unlock()
c.License = ncfg.License
c.APIKey = ncfg.APIKey
c.Proxy = ncfg.Proxy
c.transport = ncfg.transport
}
// LookupConfig - lookup config and override with valid environment settings if any.
func LookupConfig(kvs config.KVS, transport http.RoundTripper) (cfg Config, err error) {
if err = config.CheckValidKeys(config.SubnetSubSys, kvs, DefaultKVS); err != nil {
return cfg, err
}
var proxyURL *xnet.URL
proxy := env.Get(config.EnvMinIOSubnetProxy, kvs.Get(config.Proxy))
if len(proxy) > 0 {
proxyURL, err = xnet.ParseHTTPURL(proxy)
if err != nil {
return cfg, err
}
}
cfg.License = strings.TrimSpace(env.Get(config.EnvMinIOSubnetLicense, kvs.Get(config.License)))
cfg.APIKey = strings.TrimSpace(env.Get(config.EnvMinIOSubnetAPIKey, kvs.Get(config.APIKey)))
cfg.Proxy = proxy
if transport == nil {
// when transport is nil, it means we are just validating the
// inputs not performing any network calls.
return cfg, nil
}
// Make sure to clone the transport before editing the ProxyURL
if proxyURL != nil {
ctransport := transport.(*http.Transport).Clone()
ctransport.Proxy = http.ProxyURL((*url.URL)(proxyURL))
cfg.transport = ctransport
} else {
cfg.transport = transport
}
return cfg, nil
}
<file_sep>#!/usr/bin/env bash
# shellcheck disable=SC2120
exit_1() {
cleanup
echo "minio1 ============"
cat /tmp/minio1_1.log
echo "minio2 ============"
cat /tmp/minio2_1.log
echo "minio3 ============"
cat /tmp/minio3_1.log
exit 1
}
cleanup() {
echo "Cleaning up instances of MinIO"
pkill minio
pkill -9 minio
rm -rf /tmp/minio{1,2,3}
}
cleanup
unset MINIO_KMS_KES_CERT_FILE
unset MINIO_KMS_KES_KEY_FILE
unset MINIO_KMS_KES_ENDPOINT
unset MINIO_KMS_KES_KEY_NAME
export MINIO_CI_CD=1
export MINIO_BROWSER=off
export MINIO_ROOT_USER="minio"
export MINIO_ROOT_PASSWORD="<PASSWORD>"
export MINIO_KMS_AUTO_ENCRYPTION=off
export MINIO_PROMETHEUS_AUTH_TYPE=public
export MINIO_KMS_SECRET_KEY=my-minio-key:<KEY>
export MINIO_IDENTITY_OPENID_CONFIG_URL="http://localhost:5556/dex/.well-known/openid-configuration"
export MINIO_IDENTITY_OPENID_CLIENT_ID="minio-client-app"
export MINIO_IDENTITY_OPENID_CLIENT_SECRET="minio-client-app-secret"
export MINIO_IDENTITY_OPENID_CLAIM_NAME="groups"
export MINIO_IDENTITY_OPENID_SCOPES="openid,groups"
export MINIO_IDENTITY_OPENID_REDIRECT_URI="http://127.0.0.1:10000/oauth_callback"
minio server --address ":9001" --console-address ":10000" /tmp/minio1/{1...4} >/tmp/minio1_1.log 2>&1 &
site1_pid=$!
export MINIO_IDENTITY_OPENID_REDIRECT_URI="http://127.0.0.1:11000/oauth_callback"
minio server --address ":9002" --console-address ":11000" /tmp/minio2/{1...4} >/tmp/minio2_1.log 2>&1 &
site2_pid=$!
export MINIO_IDENTITY_OPENID_REDIRECT_URI="http://127.0.0.1:12000/oauth_callback"
minio server --address ":9003" --console-address ":12000" /tmp/minio3/{1...4} >/tmp/minio3_1.log 2>&1 &
site3_pid=$!
if [ ! -f ./mc ]; then
wget -O mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
chmod +x mc
fi
sleep 10
export MC_HOST_minio1=http://minio:minio123@localhost:9001
export MC_HOST_minio2=http://minio:minio123@localhost:9002
export MC_HOST_minio3=http://minio:minio123@localhost:9003
./mc admin replicate add minio1 minio2 minio3
./mc admin policy create minio1 projecta ./docs/site-replication/rw.json
sleep 5
./mc admin policy info minio2 projecta >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "expecting the command to succeed, exiting.."
exit_1
fi
./mc admin policy info minio3 projecta >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "expecting the command to succeed, exiting.."
exit_1
fi
./mc admin policy remove minio3 projecta
sleep 10
./mc admin policy info minio1 projecta
if [ $? -eq 0 ]; then
echo "expecting the command to fail, exiting.."
exit_1
fi
./mc admin policy info minio2 projecta
if [ $? -eq 0 ]; then
echo "expecting the command to fail, exiting.."
exit_1
fi
./mc admin policy create minio1 projecta ./docs/site-replication/rw.json
sleep 5
# Generate STS credential with STS call to minio1
STS_CRED=$(MINIO_ENDPOINT=http://localhost:9001 go run ./docs/site-replication/gen-oidc-sts-cred.go)
MC_HOST_foo=http://${STS_CRED}@localhost:9001 ./mc ls foo
if [ $? -ne 0 ]; then
echo "Expected sts credential to work, exiting.."
exit_1
fi
sleep 2
# Check that the STS credential works on minio2 and minio3.
MC_HOST_foo=http://${STS_CRED}@localhost:9002 ./mc ls foo
if [ $? -ne 0 ]; then
echo "Expected sts credential to work, exiting.."
exit_1
fi
MC_HOST_foo=http://${STS_CRED}@localhost:9003 ./mc ls foo
if [ $? -ne 0 ]; then
echo "Expected sts credential to work, exiting.."
exit_1
fi
STS_ACCESS_KEY=$(echo ${STS_CRED} | cut -d ':' -f 1)
# Create service account for STS user
./mc admin user svcacct add minio2 $STS_ACCESS_KEY --access-key testsvc --secret-key testsvc123
if [ $? -ne 0 ]; then
echo "adding svc account failed, exiting.."
exit_1
fi
sleep 10
./mc admin user svcacct info minio1 testsvc
if [ $? -ne 0 ]; then
echo "svc account not mirrored, exiting.."
exit_1
fi
./mc admin user svcacct info minio2 testsvc
if [ $? -ne 0 ]; then
echo "svc account not mirrored, exiting.."
exit_1
fi
./mc admin user svcacct rm minio1 testsvc
if [ $? -ne 0 ]; then
echo "removing svc account failed, exiting.."
exit_1
fi
sleep 10
./mc admin user svcacct info minio2 testsvc
if [ $? -eq 0 ]; then
echo "svc account found after delete, exiting.."
exit_1
fi
./mc admin user svcacct info minio3 testsvc
if [ $? -eq 0 ]; then
echo "svc account found after delete, exiting.."
exit_1
fi
# create a bucket bucket2 on minio1.
./mc mb minio1/bucket2
./mc mb minio1/newbucket
# copy large upload to newbucket on minio1
truncate -s 17M lrgfile
expected_checksum=$(cat ./lrgfile | md5sum)
./mc cp ./lrgfile minio1/newbucket
sleep 5
./mc stat minio2/newbucket
if [ $? -ne 0 ]; then
echo "expecting bucket to be present. exiting.."
exit_1
fi
./mc stat minio3/newbucket
if [ $? -ne 0 ]; then
echo "expecting bucket to be present. exiting.."
exit_1
fi
./mc cp README.md minio2/newbucket/
sleep 5
./mc stat minio1/newbucket/README.md
if [ $? -ne 0 ]; then
echo "expecting object to be present. exiting.."
exit_1
fi
./mc stat minio3/newbucket/README.md
if [ $? -ne 0 ]; then
echo "expecting object to be present. exiting.."
exit_1
fi
./mc rm minio3/newbucket/README.md
sleep 5
./mc stat minio2/newbucket/README.md
if [ $? -eq 0 ]; then
echo "expected file to be deleted, exiting.."
exit_1
fi
./mc stat minio1/newbucket/README.md
if [ $? -eq 0 ]; then
echo "expected file to be deleted, exiting.."
exit_1
fi
sleep 10
./mc stat minio3/newbucket/lrgfile
if [ $? -ne 0 ]; then
echo "expected object to be present, exiting.."
exit_1
fi
actual_checksum=$(./mc cat minio3/newbucket/lrgfile | md5sum)
if [ "${expected_checksum}" != "${actual_checksum}" ]; then
echo "replication failed on multipart objects expected ${expected_checksum} got ${actual_checksum}"
exit
fi
rm ./lrgfile
./mc mb --with-lock minio3/newbucket-olock
sleep 5
enabled_minio2=$(./mc stat --json minio2/newbucket-olock | jq -r .ObjectLock.enabled)
if [ $? -ne 0 ]; then
echo "expected bucket to be mirrored with object-lock but not present, exiting..."
exit_1
fi
if [ "${enabled_minio2}" != "Enabled" ]; then
echo "expected bucket to be mirrored with object-lock enabled, exiting..."
exit_1
fi
enabled_minio1=$(./mc stat --json minio1/newbucket-olock | jq -r .ObjectLock.enabled)
if [ $? -ne 0 ]; then
echo "expected bucket to be mirrored with object-lock but not present, exiting..."
exit_1
fi
if [ "${enabled_minio1}" != "Enabled" ]; then
echo "expected bucket to be mirrored with object-lock enabled, exiting..."
exit_1
fi
# "Test if most recent tag update is replicated"
./mc tag set minio2/newbucket "key=val1"
if [ $? -ne 0 ]; then
echo "expecting tag set to be successful. exiting.."
exit_1
fi
sleep 10
val=$(./mc tag list minio1/newbucket --json | jq -r .tagset | jq -r .key)
if [ "${val}" != "val1" ]; then
echo "expected bucket tag to have replicated, exiting..."
exit_1
fi
# stop minio1 instance
kill -9 ${site1_pid}
# Update tag on minio2/newbucket when minio1 is down
./mc tag set minio2/newbucket "key=val2"
# create a new bucket on minio2. This should replicate to minio1 after it comes online.
./mc mb minio2/newbucket2
# delete bucket2 on minio2. This should replicate to minio1 after it comes online.
./mc rb minio2/bucket2
# Restart minio1 instance
minio server --address ":9001" --console-address ":10000" /tmp/minio1/{1...4} >/tmp/minio1_1.log 2>&1 &
sleep 200
# Test whether most recent tag update on minio2 is replicated to minio1
val=$(./mc tag list minio1/newbucket --json | jq -r .tagset | jq -r .key)
if [ "${val}" != "val2" ]; then
echo "expected bucket tag to have replicated, exiting..."
exit_1
fi
# Test if bucket created/deleted when minio1 is down healed
diff -q <(./mc ls minio1) <(./mc ls minio2) 1>/dev/null
if [ $? -ne 0 ]; then
echo "expected 'bucket2' delete and 'newbucket2' creation to have replicated, exiting..."
exit_1
fi
<file_sep># List of metrics reported cluster wide
Each metric includes a label for the server that calculated the metric. Each metric has a label for the server that generated the metric.
These metrics can be obtained from any MinIO server once per collection.
| Name | Description |
|:---------------------------------------------|:----------------------------------------------------------------------------------------------------------------|
| `minio_audit_failed_messages` | Total number of messages that failed to send since start. |
| `minio_audit_target_queue_length` | Number of unsent messages in queue for target. |
| `minio_audit_total_messages` | Total number of messages sent since start. |
| `minio_bucket_objects_size_distribution` | Distribution of object sizes in the bucket, includes label for the bucket name. |
| `minio_bucket_quota_total_bytes` | Total bucket quota size in bytes. |
| `minio_bucket_replication_failed_bytes` | Total number of bytes failed at least once to replicate. |
| `minio_bucket_replication_failed_count` | Total number of objects which failed replication. |
| `minio_bucket_replication_latency_ms` | Replication latency in milliseconds. |
| `minio_bucket_replication_received_bytes` | Total number of bytes replicated to this bucket from another source bucket. |
| `minio_bucket_replication_sent_bytes` | Total number of bytes replicated to the target bucket. |
| `minio_bucket_traffic_received_bytes` | Total number of S3 bytes received for this bucket. |
| `minio_bucket_traffic_sent_bytes` | Total number of S3 bytes sent for this bucket. |
| `minio_bucket_usage_object_total` | Total number of objects. |
| `minio_bucket_usage_total_bytes` | Total bucket size in bytes. |
| `minio_bucket_requests_4xx_errors_total` | Total number of S3 requests with (4xx) errors on a bucket. |
| `minio_bucket_requests_5xx_errors_total` | Total number of S3 requests with (5xx) errors on a bucket. |
| `minio_bucket_requests_inflight_total` | Total number of S3 requests currently in flight on a bucket. |
| `minio_bucket_requests_total` | Total number of S3 requests on a bucket. |
| `minio_bucket_requests_canceled_total` | Total number S3 requests canceled by the client. |
| `minio_cache_hits_total` | Total number of drive cache hits. |
| `minio_cache_missed_total` | Total number of drive cache misses. |
| `minio_cache_sent_bytes` | Total number of bytes served from cache. |
| `minio_cache_total_bytes` | Total size of cache drive in bytes. |
| `minio_cache_usage_info` | Total percentage cache usage, value of 1 indicates high and 0 low, label level is set as well. |
| `minio_cache_used_bytes` | Current cache usage in bytes. |
| `minio_cluster_capacity_raw_free_bytes` | Total free capacity online in the cluster. |
| `minio_cluster_capacity_raw_total_bytes` | Total capacity online in the cluster. |
| `minio_cluster_capacity_usable_free_bytes` | Total free usable capacity online in the cluster. |
| `minio_cluster_capacity_usable_total_bytes` | Total usable capacity online in the cluster. |
| `minio_cluster_disk_offline_total` | Total drives offline. |
| `minio_cluster_disk_online_total` | Total drives online. |
| `minio_cluster_disk_total` | Total drives. |
| `minio_cluster_ilm_transitioned_bytes` | Total bytes transitioned to a tier. |
| `minio_cluster_ilm_transitioned_objects` | Total number of objects transitioned to a tier. |
| `minio_cluster_ilm_transitioned_versions` | Total number of versions transitioned to a tier. |
| `minio_cluster_kms_online` | Reports whether the KMS is online (1) or offline (0). |
| `minio_cluster_kms_request_error` | Number of KMS requests that failed due to some error. (HTTP 4xx status code). |
| `minio_cluster_kms_request_failure` | Number of KMS requests that failed due to some internal failure. (HTTP 5xx status code). |
| `minio_cluster_kms_request_success` | Number of KMS requests that succeeded. |
| `minio_cluster_kms_uptime` | The time the KMS has been up and running in seconds. |
| `minio_cluster_nodes_offline_total` | Total number of MinIO nodes offline. |
| `minio_cluster_nodes_online_total` | Total number of MinIO nodes online. |
| `minio_heal_objects_errors_total` | Objects for which healing failed in current self healing run. |
| `minio_heal_objects_heal_total` | Objects healed in current self healing run. |
| `minio_heal_objects_total` | Objects scanned in current self healing run. |
| `minio_heal_time_last_activity_nano_seconds` | Time elapsed (in nano seconds) since last self healing activity. |
| `minio_inter_node_traffic_dial_avg_time` | Average time of internodes TCP dial calls. |
| `minio_inter_node_traffic_dial_errors` | Total number of internode TCP dial timeouts and errors. |
| `minio_inter_node_traffic_errors_total` | Total number of failed internode calls. |
| `minio_inter_node_traffic_received_bytes` | Total number of bytes received from other peer nodes. |
| `minio_inter_node_traffic_sent_bytes` | Total number of bytes sent to the other peer nodes. |
| `minio_minio_update_percent` | Total percentage cache usage. |
| `minio_node_disk_free_bytes` | Total storage available on a drive. |
| `minio_node_disk_free_inodes` | Total free inodes. |
| `minio_node_disk_latency_us` | Average last minute latency in ยตs for drive API storage operations. |
| `minio_node_disk_offline_total` | Total drives offline. |
| `minio_node_disk_online_total` | Total drives online. |
| `minio_node_disk_total` | Total drives. |
| `minio_node_disk_total_bytes` | Total storage on a drive. |
| `minio_node_disk_used_bytes` | Total storage used on a drive. |
| `minio_node_file_descriptor_limit_total` | Limit on total number of open file descriptors for the MinIO Server process. |
| `minio_node_file_descriptor_open_total` | Total number of open file descriptors by the MinIO Server process. |
| `minio_node_go_routine_total` | Total number of go routines running. |
| `minio_node_iam_last_sync_duration_millis` | Last successful IAM data sync duration in milliseconds. |
| `minio_node_iam_since_last_sync_millis` | Time (in milliseconds) since last successful IAM data sync. |
| `minio_node_iam_sync_failures` | Number of failed IAM data syncs since server start. |
| `minio_node_iam_sync_successes` | Number of successful IAM data syncs since server start. |
| `minio_node_ilm_expiry_pending_tasks` | Number of pending ILM expiry tasks in the queue. |
| `minio_node_ilm_transition_active_tasks` | Number of active ILM transition tasks. |
| `minio_node_ilm_transition_pending_tasks` | Number of pending ILM transition tasks in the queue. |
| `minio_node_ilm_versions_scanned` | Total number of object versions checked for ilm actions since server start. |
| `minio_node_io_rchar_bytes` | Total bytes read by the process from the underlying storage system including cache, /proc/[pid]/io rchar. |
| `minio_node_io_read_bytes` | Total bytes read by the process from the underlying storage system, /proc/[pid]/io read_bytes. |
| `minio_node_io_wchar_bytes` | Total bytes written by the process to the underlying storage system including page cache, /proc/[pid]/io wchar. |
| `minio_node_io_write_bytes` | Total bytes written by the process to the underlying storage system, /proc/[pid]/io write_bytes. |
| `minio_node_process_cpu_total_seconds` | Total user and system CPU time spent in seconds. |
| `minio_node_process_resident_memory_bytes` | Resident memory size in bytes. |
| `minio_node_process_starttime_seconds` | Start time for MinIO process per node, time in seconds since Unix epoc. |
| `minio_node_process_uptime_seconds` | Uptime for MinIO process per node in seconds. |
| `minio_node_scanner_bucket_scans_finished` | Total number of bucket scans finished since server start. |
| `minio_node_scanner_bucket_scans_started` | Total number of bucket scans started since server start. |
| `minio_node_scanner_directories_scanned` | Total number of directories scanned since server start. |
| `minio_node_scanner_objects_scanned` | Total number of unique objects scanned since server start. |
| `minio_node_scanner_versions_scanned` | Total number of object versions scanned since server start. |
| `minio_node_syscall_read_total` | Total read SysCalls to the kernel. /proc/[pid]/io syscr. |
| `minio_node_syscall_write_total` | Total write SysCalls to the kernel. /proc/[pid]/io syscw. |
| `minio_notify_current_send_in_progress` | Number of concurrent async Send calls active to all targets. |
| `minio_notify_target_queue_length` | Number of unsent notifications in queue for target. |
| `minio_s3_requests_4xx_errors_total` | Total number S3 requests with (4xx) errors. |
| `minio_s3_requests_5xx_errors_total` | Total number S3 requests with (5xx) errors. |
| `minio_s3_requests_canceled_total` | Total number S3 requests canceled by the client. |
| `minio_s3_requests_errors_total` | Total number S3 requests with (4xx and 5xx) errors. |
| `minio_s3_requests_incoming_total` | Volatile number of total incoming S3 requests. |
| `minio_s3_requests_inflight_total` | Total number of S3 requests currently in flight. |
| `minio_s3_requests_rejected_auth_total` | Total number S3 requests rejected for auth failure. |
| `minio_s3_requests_rejected_header_total` | Total number S3 requests rejected for invalid header. |
| `minio_s3_requests_rejected_invalid_total` | Total number S3 invalid requests. |
| `minio_s3_requests_rejected_timestamp_total` | Total number S3 requests rejected for invalid timestamp. |
| `minio_s3_requests_total` | Total number S3 requests. |
| `minio_s3_requests_waiting_total` | Number of S3 requests in the waiting queue. |
| `minio_s3_time_ttfb_seconds_distribution` | Distribution of the time to first byte across API calls. |
| `minio_s3_traffic_received_bytes` | Total number of s3 bytes received. |
| `minio_s3_traffic_sent_bytes` | Total number of s3 bytes sent. |
| `minio_software_commit_info` | Git commit hash for the MinIO release. |
| `minio_software_version_info` | MinIO Release tag for the server. |
| `minio_usage_last_activity_nano_seconds` | Time elapsed (in nano seconds) since last scan activity. |
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"fmt"
"time"
"github.com/tinylib/msgp/msgp"
)
// unmarshalV unmarshals with a specific header version.
func (x *xlMetaV2VersionHeader) unmarshalV(v uint8, bts []byte) (o []byte, err error) {
switch v {
case 1:
return x.unmarshalV1(bts)
case xlHeaderVersion:
return x.UnmarshalMsg(bts)
}
return bts, fmt.Errorf("unknown xlHeaderVersion: %d", v)
}
// unmarshalV1 decodes version 1, never released.
func (x *xlMetaV2VersionHeader) unmarshalV1(bts []byte) (o []byte, err error) {
var zb0001 uint32
zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
if zb0001 != 4 {
err = msgp.ArrayError{Wanted: 4, Got: zb0001}
return
}
bts, err = msgp.ReadExactBytes(bts, (x.VersionID)[:])
if err != nil {
err = msgp.WrapError(err, "VersionID")
return
}
x.ModTime, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ModTime")
return
}
{
var zb0002 uint8
zb0002, bts, err = msgp.ReadUint8Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
x.Type = VersionType(zb0002)
}
{
var zb0003 uint8
zb0003, bts, err = msgp.ReadUint8Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Flags")
return
}
x.Flags = xlFlags(zb0003)
}
o = bts
return
}
// unmarshalV unmarshals with a specific metadata version.
func (j *xlMetaV2Version) unmarshalV(v uint8, bts []byte) (o []byte, err error) {
if v > xlMetaVersion {
return bts, fmt.Errorf("unknown xlMetaVersion: %d", v)
}
// Clear omitempty fields:
if j.ObjectV2 != nil && len(j.ObjectV2.PartIndices) > 0 {
j.ObjectV2.PartIndices = j.ObjectV2.PartIndices[:0]
}
o, err = j.UnmarshalMsg(bts)
// Fix inconsistent x-minio-internal-replication-timestamp by converting to UTC.
// Fixed in version 2 or later
if err == nil && j.Type == DeleteType && v < 2 {
if val, ok := j.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicationTimestamp]; ok {
tm, err := time.Parse(time.RFC3339Nano, string(val))
if err == nil {
j.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicationTimestamp] = []byte(tm.UTC().Format(time.RFC3339Nano))
}
}
if val, ok := j.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicaTimestamp]; ok {
tm, err := time.Parse(time.RFC3339Nano, string(val))
if err == nil {
j.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicaTimestamp] = []byte(tm.UTC().Format(time.RFC3339Nano))
}
}
}
// Clean up PartEtags on v1
if j.ObjectV2 != nil {
allEmpty := true
for _, tag := range j.ObjectV2.PartETags {
if len(tag) != 0 {
allEmpty = false
break
}
}
if allEmpty {
j.ObjectV2.PartETags = nil
}
}
return o, err
}
<file_sep>//go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package main
// This programs mocks user interaction against Dex IDP and generates STS
// credentials. It is for MinIO testing purposes only.
//
// Run like:
//
// $ MINIO_ENDPOINT=http://localhost:9000 go run gen-oidc-sts-cred.go
import (
"context"
"fmt"
"log"
"net/http"
"os"
cr "github.com/minio/minio-go/v7/pkg/credentials"
cmd "github.com/minio/minio/cmd"
)
func main() {
ctx := context.Background()
endpoint := os.Getenv("MINIO_ENDPOINT")
if endpoint == "" {
log.Fatalf("Please specify a MinIO server endpoint environment variable like:\n\n\texport MINIO_ENDPOINT=http://localhost:9000")
}
appParams := cmd.OpenIDClientAppParams{
ClientID: "minio-client-app",
ClientSecret: "minio-client-app-secret",
ProviderURL: "http://127.0.0.1:5556/dex",
RedirectURL: "http://127.0.0.1:10000/oauth_callback",
}
oidcToken, err := cmd.MockOpenIDTestUserInteraction(ctx, appParams, "<EMAIL>", "dillon")
if err != nil {
log.Fatalf("Failed to generate OIDC token: %v", err)
}
roleARN := os.Getenv("ROLE_ARN")
webID := cr.STSWebIdentity{
Client: &http.Client{},
STSEndpoint: endpoint,
GetWebIDTokenExpiry: func() (*cr.WebIdentityToken, error) {
return &cr.WebIdentityToken{
Token: oidcToken,
}, nil
},
RoleARN: roleARN,
}
value, err := webID.Retrieve()
if err != nil {
log.Fatalf("Expected to generate credentials: %v", err)
}
// Print credentials separated by colons:
fmt.Printf("%s:%s:%s\n", value.AccessKeyID, value.SecretAccessKey, value.SessionToken)
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"compress/gzip"
"context"
"encoding/json"
"errors"
"fmt"
"math/rand"
"net/url"
"time"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/logger"
)
var callhomeLeaderLockTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
// initCallhome will start the callhome task in the background.
func initCallhome(ctx context.Context, objAPI ObjectLayer) {
if !globalCallhomeConfig.Enabled() {
return
}
go func() {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
// Leader node (that successfully acquires the lock inside runCallhome)
// will keep performing the callhome. If the leader goes down for some reason,
// the lock will be released and another node will acquire it and take over
// because of this loop.
for {
if !globalCallhomeConfig.Enabled() {
return
}
if !runCallhome(ctx, objAPI) {
// callhome was disabled or context was canceled
return
}
// callhome running on a different node.
// sleep for some time and try again.
duration := time.Duration(r.Float64() * float64(globalCallhomeConfig.FrequencyDur()))
if duration < time.Second {
// Make sure to sleep atleast a second to avoid high CPU ticks.
duration = time.Second
}
time.Sleep(duration)
}
}()
}
func runCallhome(ctx context.Context, objAPI ObjectLayer) bool {
// Make sure only 1 callhome is running on the cluster.
locker := objAPI.NewNSLock(minioMetaBucket, "callhome/runCallhome.lock")
lkctx, err := locker.GetLock(ctx, callhomeLeaderLockTimeout)
if err != nil {
// lock timedout means some other node is the leader,
// cycle back return 'true'
return true
}
ctx = lkctx.Context()
defer locker.Unlock(lkctx)
callhomeTimer := time.NewTimer(globalCallhomeConfig.FrequencyDur())
defer callhomeTimer.Stop()
for {
if !globalCallhomeConfig.Enabled() {
// Stop the processing as callhome got disabled
return false
}
select {
case <-ctx.Done():
// indicates that we do not need to run callhome anymore
return false
case <-callhomeTimer.C:
if !globalCallhomeConfig.Enabled() {
// Stop the processing as callhome got disabled
return false
}
performCallhome(ctx)
// Reset the timer for next cycle.
callhomeTimer.Reset(globalCallhomeConfig.FrequencyDur())
}
}
}
func performCallhome(ctx context.Context) {
deadline := 10 * time.Second // Default deadline is 10secs for callhome
objectAPI := newObjectLayerFn()
if objectAPI == nil {
logger.LogIf(ctx, errors.New("Callhome: object layer not ready"))
return
}
healthCtx, healthCancel := context.WithTimeout(ctx, deadline)
defer healthCancel()
healthInfoCh := make(chan madmin.HealthInfo)
query := url.Values{}
for _, k := range madmin.HealthDataTypesList {
query.Set(string(k), "true")
}
healthInfo := madmin.HealthInfo{
TimeStamp: time.Now().UTC(),
Version: madmin.HealthInfoVersion,
Minio: madmin.MinioHealthInfo{
Info: madmin.MinioInfo{
DeploymentID: globalDeploymentID,
},
},
}
go fetchHealthInfo(healthCtx, objectAPI, &query, healthInfoCh, healthInfo)
for {
select {
case hi, hasMore := <-healthInfoCh:
if !hasMore {
// Received all data. Send to SUBNET and return
err := sendHealthInfo(ctx, healthInfo)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to perform callhome: %w", err))
}
return
}
healthInfo = hi
case <-healthCtx.Done():
return
}
}
}
const (
healthURL = "https://subnet.min.io/api/health/upload"
healthURLDev = "http://localhost:9000/api/health/upload"
)
func sendHealthInfo(ctx context.Context, healthInfo madmin.HealthInfo) error {
url := healthURL
if globalIsCICD {
url = healthURLDev
}
filename := fmt.Sprintf("health_%s.json.gz", UTCNow().Format("20060102150405"))
url += "?filename=" + filename
_, err := globalSubnetConfig.Upload(url, filename, createHealthJSONGzip(ctx, healthInfo))
return err
}
func createHealthJSONGzip(ctx context.Context, healthInfo madmin.HealthInfo) []byte {
var b bytes.Buffer
gzWriter := gzip.NewWriter(&b)
header := struct {
Version string `json:"version"`
}{Version: healthInfo.Version}
enc := json.NewEncoder(gzWriter)
if e := enc.Encode(header); e != nil {
logger.LogIf(ctx, fmt.Errorf("Could not encode health info header: %w", e))
return nil
}
if e := enc.Encode(healthInfo); e != nil {
logger.LogIf(ctx, fmt.Errorf("Could not encode health info: %w", e))
return nil
}
gzWriter.Flush()
gzWriter.Close()
return b.Bytes()
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package target
import (
"sync"
"sync/atomic"
)
// Inspired from Golang sync.Once but it is only marked
// initialized when the provided function returns nil.
type lazyInit struct {
done uint32
m sync.Mutex
}
func (l *lazyInit) Do(f func() error) error {
if atomic.LoadUint32(&l.done) == 0 {
return l.doSlow(f)
}
return nil
}
func (l *lazyInit) doSlow(f func() error) error {
l.m.Lock()
defer l.m.Unlock()
if atomic.LoadUint32(&l.done) == 0 {
if err := f(); err != nil {
return err
}
// Mark as done only when f() is successful
atomic.StoreUint32(&l.done, 1)
}
return nil
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package logger
import (
"context"
"fmt"
"strings"
"sync"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/logger/target/http"
"github.com/minio/minio/internal/logger/target/kafka"
"github.com/minio/minio/internal/logger/target/types"
)
// Target is the entity that we will receive
// a single log entry and Send it to the log target
//
// e.g. Send the log to a http server
type Target interface {
String() string
Endpoint() string
Stats() types.TargetStats
Init(ctx context.Context) error
IsOnline(ctx context.Context) bool
Cancel()
Send(ctx context.Context, entry interface{}) error
Type() types.TargetType
}
var (
swapAuditMuRW sync.RWMutex
swapSystemMuRW sync.RWMutex
// systemTargets is the set of enabled loggers.
// Must be immutable at all times.
// Can be swapped to another while holding swapMu
systemTargets = []Target{}
// This is always set represent /dev/console target
consoleTgt Target
)
// TargetStatus returns status of the target (online|offline)
func TargetStatus(ctx context.Context, h Target) madmin.Status {
if h.IsOnline(ctx) {
return madmin.Status{Status: string(madmin.ItemOnline)}
}
// Previous initialization had failed. Try again.
if e := h.Init(ctx); e == nil {
return madmin.Status{Status: string(madmin.ItemOnline)}
}
return madmin.Status{Status: string(madmin.ItemOffline)}
}
// SystemTargets returns active targets.
// Returned slice may not be modified in any way.
func SystemTargets() []Target {
swapSystemMuRW.RLock()
defer swapSystemMuRW.RUnlock()
res := systemTargets
return res
}
// AuditTargets returns active audit targets.
// Returned slice may not be modified in any way.
func AuditTargets() []Target {
swapAuditMuRW.RLock()
defer swapAuditMuRW.RUnlock()
res := auditTargets
return res
}
// CurrentStats returns the current statistics.
func CurrentStats() map[string]types.TargetStats {
sys := SystemTargets()
audit := AuditTargets()
res := make(map[string]types.TargetStats, len(sys)+len(audit))
cnt := make(map[string]int, len(sys)+len(audit))
// Add system and audit.
for _, t := range sys {
key := strings.ToLower(t.Type().String())
n := cnt[key]
cnt[key]++
key = fmt.Sprintf("sys_<KEY>
res[key] = t.Stats()
}
for _, t := range audit {
key := strings.ToLower(t.Type().String())
n := cnt[key]
cnt[key]++
key = fmt.Sprintf("audit_%s_%d", key, n)
res[key] = t.Stats()
}
return res
}
// auditTargets is the list of enabled audit loggers
// Must be immutable at all times.
// Can be swapped to another while holding swapMu
var (
auditTargets = []Target{}
)
// AddSystemTarget adds a new logger target to the
// list of enabled loggers
func AddSystemTarget(ctx context.Context, t Target) error {
if err := t.Init(ctx); err != nil {
return err
}
swapSystemMuRW.Lock()
defer swapSystemMuRW.Unlock()
if consoleTgt == nil {
if t.Type() == types.TargetConsole {
consoleTgt = t
}
}
updated := append(make([]Target, 0, len(systemTargets)+1), systemTargets...)
updated = append(updated, t)
systemTargets = updated
return nil
}
func initSystemTargets(ctx context.Context, cfgMap map[string]http.Config) ([]Target, []error) {
tgts := []Target{}
errs := []error{}
for _, l := range cfgMap {
if l.Enabled {
t := http.New(l)
tgts = append(tgts, t)
e := t.Init(ctx)
if e != nil {
errs = append(errs, e)
}
}
}
return tgts, errs
}
func initKafkaTargets(ctx context.Context, cfgMap map[string]kafka.Config) ([]Target, []error) {
tgts := []Target{}
errs := []error{}
for _, l := range cfgMap {
if l.Enabled {
t := kafka.New(l)
tgts = append(tgts, t)
e := t.Init(ctx)
if e != nil {
errs = append(errs, e)
}
}
}
return tgts, errs
}
// Split targets into two groups:
//
// group1 contains all targets of type t
// group2 contains the remaining targets
func splitTargets(targets []Target, t types.TargetType) (group1 []Target, group2 []Target) {
for _, target := range targets {
if target.Type() == t {
group1 = append(group1, target)
} else {
group2 = append(group2, target)
}
}
return
}
func cancelTargets(targets []Target) {
for _, target := range targets {
target.Cancel()
}
}
// UpdateSystemTargets swaps targets with newly loaded ones from the cfg
func UpdateSystemTargets(ctx context.Context, cfg Config) []error {
newTgts, errs := initSystemTargets(ctx, cfg.HTTP)
swapSystemMuRW.Lock()
consoleTargets, otherTargets := splitTargets(systemTargets, types.TargetConsole)
newTgts = append(newTgts, consoleTargets...)
systemTargets = newTgts
swapSystemMuRW.Unlock()
cancelTargets(otherTargets) // cancel running targets
return errs
}
// UpdateAuditWebhookTargets swaps audit webhook targets with newly loaded ones from the cfg
func UpdateAuditWebhookTargets(ctx context.Context, cfg Config) []error {
newWebhookTgts, errs := initSystemTargets(ctx, cfg.AuditWebhook)
swapAuditMuRW.Lock()
// Retain kafka targets
oldWebhookTgts, otherTgts := splitTargets(auditTargets, types.TargetHTTP)
newWebhookTgts = append(newWebhookTgts, otherTgts...)
auditTargets = newWebhookTgts
swapAuditMuRW.Unlock()
cancelTargets(oldWebhookTgts) // cancel running targets
return errs
}
// UpdateAuditKafkaTargets swaps audit kafka targets with newly loaded ones from the cfg
func UpdateAuditKafkaTargets(ctx context.Context, cfg Config) []error {
newKafkaTgts, errs := initKafkaTargets(ctx, cfg.AuditKafka)
swapAuditMuRW.Lock()
// Retain webhook targets
oldKafkaTgts, otherTgts := splitTargets(auditTargets, types.TargetKafka)
newKafkaTgts = append(newKafkaTgts, otherTgts...)
auditTargets = newKafkaTgts
swapAuditMuRW.Unlock()
cancelTargets(oldKafkaTgts) // cancel running targets
return errs
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"fmt"
"net/url"
"strings"
"time"
"github.com/minio/madmin-go/v3"
minio "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
)
type warmBackendMinIO struct {
warmBackendS3
}
var _ WarmBackend = (*warmBackendMinIO)(nil)
func newWarmBackendMinIO(conf madmin.TierMinIO, tier string) (*warmBackendMinIO, error) {
u, err := url.Parse(conf.Endpoint)
if err != nil {
return nil, err
}
creds := credentials.NewStaticV4(conf.AccessKey, conf.SecretKey, "")
getRemoteTierTargetInstanceTransportOnce.Do(func() {
getRemoteTierTargetInstanceTransport = NewHTTPTransportWithTimeout(10 * time.Minute)
})
opts := &minio.Options{
Creds: creds,
Secure: u.Scheme == "https",
Transport: getRemoteTierTargetInstanceTransport,
}
client, err := minio.New(u.Host, opts)
if err != nil {
return nil, err
}
client.SetAppInfo(fmt.Sprintf("minio-tier-%s", tier), ReleaseTag)
core := &minio.Core{Client: client}
return &warmBackendMinIO{
warmBackendS3{
client: client,
core: core,
Bucket: conf.Bucket,
Prefix: strings.TrimSuffix(conf.Prefix, slashSeparator),
},
}, nil
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package lambda
import (
"context"
"errors"
"fmt"
"net/http"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/config/lambda/event"
"github.com/minio/minio/internal/config/lambda/target"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/env"
xnet "github.com/minio/pkg/net"
)
// ErrTargetsOffline - Indicates single/multiple target failures.
var ErrTargetsOffline = errors.New("one or more targets are offline. Please use `mc admin info --json` to check the offline targets")
// TestSubSysLambdaTargets - tests notification targets of given subsystem
func TestSubSysLambdaTargets(ctx context.Context, cfg config.Config, subSys string, transport *http.Transport) error {
if err := checkValidLambdaKeysForSubSys(subSys, cfg[subSys]); err != nil {
return err
}
targetList, err := fetchSubSysTargets(ctx, cfg, subSys, transport)
if err != nil {
return err
}
for _, target := range targetList {
defer target.Close()
}
for _, target := range targetList {
yes, err := target.IsActive()
if err == nil && !yes {
err = ErrTargetsOffline
}
if err != nil {
return fmt.Errorf("error (%s): %w", target.ID(), err)
}
}
return nil
}
func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, transport *http.Transport) (targets []event.Target, err error) {
if err := checkValidLambdaKeysForSubSys(subSys, cfg[subSys]); err != nil {
return nil, err
}
if subSys == config.LambdaWebhookSubSys {
webhookTargets, err := GetLambdaWebhook(cfg[config.LambdaWebhookSubSys], transport)
if err != nil {
return nil, err
}
for id, args := range webhookTargets {
if !args.Enable {
continue
}
t, err := target.NewWebhookTarget(ctx, id, args, logger.LogOnceIf, transport)
if err != nil {
return nil, err
}
targets = append(targets, t)
}
}
return targets, nil
}
// FetchEnabledTargets - Returns a set of configured TargetList
func FetchEnabledTargets(ctx context.Context, cfg config.Config, transport *http.Transport) (*event.TargetList, error) {
targetList := event.NewTargetList()
for _, subSys := range config.LambdaSubSystems.ToSlice() {
targets, err := fetchSubSysTargets(ctx, cfg, subSys, transport)
if err != nil {
return nil, err
}
for _, t := range targets {
if err = targetList.Add(t); err != nil {
return nil, err
}
}
}
return targetList, nil
}
// DefaultLambdaKVS - default notification list of kvs.
var (
DefaultLambdaKVS = map[string]config.KVS{
config.LambdaWebhookSubSys: DefaultWebhookKVS,
}
)
// DefaultWebhookKVS - default KV for webhook config
var (
DefaultWebhookKVS = config.KVS{
config.KV{
Key: config.Enable,
Value: config.EnableOff,
},
config.KV{
Key: target.WebhookEndpoint,
Value: "",
},
config.KV{
Key: target.WebhookAuthToken,
Value: "",
},
config.KV{
Key: target.WebhookClientCert,
Value: "",
},
config.KV{
Key: target.WebhookClientKey,
Value: "",
},
}
)
func checkValidLambdaKeysForSubSys(subSys string, tgt map[string]config.KVS) error {
validKVS, ok := DefaultLambdaKVS[subSys]
if !ok {
return nil
}
for tname, kv := range tgt {
subSysTarget := subSys
if tname != config.Default {
subSysTarget = subSys + config.SubSystemSeparator + tname
}
if v, ok := kv.Lookup(config.Enable); ok && v == config.EnableOn {
if err := config.CheckValidKeys(subSysTarget, kv, validKVS); err != nil {
return err
}
}
}
return nil
}
// GetLambdaWebhook - returns a map of registered notification 'webhook' targets
func GetLambdaWebhook(webhookKVS map[string]config.KVS, transport *http.Transport) (
map[string]target.WebhookArgs, error,
) {
webhookTargets := make(map[string]target.WebhookArgs)
for k, kv := range config.Merge(webhookKVS, target.EnvWebhookEnable, DefaultWebhookKVS) {
enableEnv := target.EnvWebhookEnable
if k != config.Default {
enableEnv = enableEnv + config.Default + k
}
enabled, err := config.ParseBool(env.Get(enableEnv, kv.Get(config.Enable)))
if err != nil {
return nil, err
}
if !enabled {
continue
}
urlEnv := target.EnvWebhookEndpoint
if k != config.Default {
urlEnv = urlEnv + config.Default + k
}
url, err := xnet.ParseHTTPURL(env.Get(urlEnv, kv.Get(target.WebhookEndpoint)))
if err != nil {
return nil, err
}
authEnv := target.EnvWebhookAuthToken
if k != config.Default {
authEnv = authEnv + config.Default + k
}
clientCertEnv := target.EnvWebhookClientCert
if k != config.Default {
clientCertEnv = clientCertEnv + config.Default + k
}
clientKeyEnv := target.EnvWebhookClientKey
if k != config.Default {
clientKeyEnv = clientKeyEnv + config.Default + k
}
webhookArgs := target.WebhookArgs{
Enable: enabled,
Endpoint: *url,
Transport: transport,
AuthToken: env.Get(authEnv, kv.Get(target.WebhookAuthToken)),
ClientCert: env.Get(clientCertEnv, kv.Get(target.WebhookClientCert)),
ClientKey: env.Get(clientKeyEnv, kv.Get(target.WebhookClientKey)),
}
if err = webhookArgs.Validate(); err != nil {
return nil, err
}
webhookTargets[k] = webhookArgs
}
return webhookTargets, nil
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"os"
"runtime"
"strings"
"testing"
"time"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio-go/v7/pkg/s3utils"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio-go/v7/pkg/signer"
"github.com/minio/minio/internal/auth"
)
const (
testDefaultTimeout = 30 * time.Second
)
// API suite container for IAM
type TestSuiteIAM struct {
TestSuiteCommon
ServerTypeDescription string
// Flag to turn on tests for etcd backend IAM
withEtcdBackend bool
endpoint string
adm *madmin.AdminClient
client *minio.Client
}
func newTestSuiteIAM(c TestSuiteCommon, withEtcdBackend bool) *TestSuiteIAM {
etcdStr := ""
if withEtcdBackend {
etcdStr = " (with etcd backend)"
}
return &TestSuiteIAM{
TestSuiteCommon: c,
ServerTypeDescription: fmt.Sprintf("%s%s", c.serverType, etcdStr),
withEtcdBackend: withEtcdBackend,
}
}
func (s *TestSuiteIAM) iamSetup(c *check) {
var err error
// strip url scheme from endpoint
s.endpoint = strings.TrimPrefix(s.endPoint, "http://")
if s.secure {
s.endpoint = strings.TrimPrefix(s.endPoint, "https://")
}
s.adm, err = madmin.New(s.endpoint, s.accessKey, s.secretKey, s.secure)
if err != nil {
c.Fatalf("error creating admin client: %v", err)
}
// Set transport, so that TLS is handled correctly.
s.adm.SetCustomTransport(s.TestSuiteCommon.client.Transport)
s.client, err = minio.New(s.endpoint, &minio.Options{
Creds: credentials.NewStaticV4(s.accessKey, s.secretKey, ""),
Secure: s.secure,
Transport: s.TestSuiteCommon.client.Transport,
})
if err != nil {
c.Fatalf("error creating minio client: %v", err)
}
}
// List of all IAM test suites (i.e. test server configuration combinations)
// common to tests.
var iamTestSuites = func() []*TestSuiteIAM {
baseTestCases := []TestSuiteCommon{
// Init and run test on ErasureSD backend with signature v4.
{serverType: "ErasureSD", signer: signerV4},
// Init and run test on ErasureSD backend, with tls enabled.
{serverType: "ErasureSD", signer: signerV4, secure: true},
// Init and run test on Erasure backend.
{serverType: "Erasure", signer: signerV4},
// Init and run test on ErasureSet backend.
{serverType: "ErasureSet", signer: signerV4},
}
testCases := []*TestSuiteIAM{}
for _, bt := range baseTestCases {
testCases = append(testCases,
newTestSuiteIAM(bt, false),
newTestSuiteIAM(bt, true),
)
}
return testCases
}()
const (
EnvTestEtcdBackend = "ETCD_SERVER"
)
func (s *TestSuiteIAM) setUpEtcd(c *check, etcdServer string) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
configCmds := []string{
"etcd",
"endpoints=" + etcdServer,
"path_prefix=" + mustGetUUID(),
}
_, err := s.adm.SetConfigKV(ctx, strings.Join(configCmds, " "))
if err != nil {
c.Fatalf("unable to setup Etcd for tests: %v", err)
}
s.RestartIAMSuite(c)
}
func (s *TestSuiteIAM) SetUpSuite(c *check) {
// If etcd backend is specified and etcd server is not present, the test
// is skipped.
etcdServer := os.Getenv(EnvTestEtcdBackend)
if s.withEtcdBackend && etcdServer == "" {
c.Skip("Skipping etcd backend IAM test as no etcd server is configured.")
}
s.TestSuiteCommon.SetUpSuite(c)
s.iamSetup(c)
if s.withEtcdBackend {
s.setUpEtcd(c, etcdServer)
}
}
func (s *TestSuiteIAM) RestartIAMSuite(c *check) {
s.TestSuiteCommon.RestartTestServer(c)
s.iamSetup(c)
}
func (s *TestSuiteIAM) getAdminClient(c *check, accessKey, secretKey, sessionToken string) *madmin.AdminClient {
madmClnt, err := madmin.NewWithOptions(s.endpoint, &madmin.Options{
Creds: credentials.NewStaticV4(accessKey, secretKey, sessionToken),
Secure: s.secure,
})
if err != nil {
c.Fatalf("error creating user admin client: %s", err)
}
madmClnt.SetCustomTransport(s.TestSuiteCommon.client.Transport)
return madmClnt
}
func (s *TestSuiteIAM) getUserClient(c *check, accessKey, secretKey, sessionToken string) *minio.Client {
client, err := minio.New(s.endpoint, &minio.Options{
Creds: credentials.NewStaticV4(accessKey, secretKey, sessionToken),
Secure: s.secure,
Transport: s.TestSuiteCommon.client.Transport,
})
if err != nil {
c.Fatalf("error creating user minio client: %s", err)
}
return client
}
func TestIAMInternalIDPServerSuite(t *testing.T) {
if runtime.GOOS == globalWindowsOSName {
t.Skip("windows is clunky disable these tests")
}
for i, testCase := range iamTestSuites {
t.Run(
fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.ServerTypeDescription),
func(t *testing.T) {
suite := testCase
c := &check{t, testCase.serverType}
suite.SetUpSuite(c)
suite.TestUserCreate(c)
suite.TestUserPolicyEscalationBug(c)
suite.TestPolicyCreate(c)
suite.TestCannedPolicies(c)
suite.TestGroupAddRemove(c)
suite.TestServiceAccountOpsByAdmin(c)
suite.TestServiceAccountOpsByUser(c)
suite.TestAddServiceAccountPerms(c)
suite.TearDownSuite(c)
},
)
}
}
func (s *TestSuiteIAM) TestUserCreate(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
// 1. Create a user.
accessKey, secretKey := mustGenerateCredentials(c)
err := s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
if err != nil {
c.Fatalf("Unable to set user: %v", err)
}
// 2. Check new user appears in listing
usersMap, err := s.adm.ListUsers(ctx)
if err != nil {
c.Fatalf("error listing: %v", err)
}
v, ok := usersMap[accessKey]
if !ok {
c.Fatalf("user not listed: %s", accessKey)
}
c.Assert(v.Status, madmin.AccountEnabled)
// 3. Associate policy and check that user can access
err = s.adm.SetPolicy(ctx, "readwrite", accessKey, false)
if err != nil {
c.Fatalf("unable to set policy: %v", err)
}
client := s.getUserClient(c, accessKey, secretKey, "")
err = client.MakeBucket(ctx, getRandomBucketName(), minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("user could not create bucket: %v", err)
}
// 3.10. Check that user's password can be updated.
_, newSecretKey := mustGenerateCredentials(c)
err = s.adm.SetUser(ctx, accessKey, newSecretKey, madmin.AccountEnabled)
if err != nil {
c.Fatalf("Unable to update user's secret key: %v", err)
}
// 3.10.1 Check that old password no longer works.
err = client.MakeBucket(ctx, getRandomBucketName(), minio.MakeBucketOptions{})
if err == nil {
c.Fatalf("user was unexpectedly able to create bucket with bad password!")
}
// 3.10.2 Check that new password works.
client = s.getUserClient(c, accessKey, newSecretKey, "")
err = client.MakeBucket(ctx, getRandomBucketName(), minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("user could not create bucket: %v", err)
}
// 4. Check that user can be disabled and verify it.
err = s.adm.SetUserStatus(ctx, accessKey, madmin.AccountDisabled)
if err != nil {
c.Fatalf("could not set user account to disabled")
}
usersMap, err = s.adm.ListUsers(ctx)
if err != nil {
c.Fatalf("error listing: %v", err)
}
v, ok = usersMap[accessKey]
if !ok {
c.Fatalf("user was not listed after disabling: %s", accessKey)
}
c.Assert(v.Status, madmin.AccountDisabled)
err = client.MakeBucket(ctx, getRandomBucketName(), minio.MakeBucketOptions{})
if err == nil {
c.Fatalf("user account was not disabled!")
}
// 5. Check that user can be deleted and verify it.
err = s.adm.RemoveUser(ctx, accessKey)
if err != nil {
c.Fatalf("user could not be deleted: %v", err)
}
usersMap, err = s.adm.ListUsers(ctx)
if err != nil {
c.Fatalf("error listing: %v", err)
}
_, ok = usersMap[accessKey]
if ok {
c.Fatalf("user not deleted: %s", accessKey)
}
err = client.MakeBucket(ctx, getRandomBucketName(), minio.MakeBucketOptions{})
if err == nil {
c.Fatalf("user account was not deleted!")
}
}
func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
bucket := getRandomBucketName()
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket creat error: %v", err)
}
// 2. Create a user, associate policy and verify access
accessKey, secretKey := mustGenerateCredentials(c)
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
if err != nil {
c.Fatalf("Unable to set user: %v", err)
}
// 2.1 check that user does not have any access to the bucket
uClient := s.getUserClient(c, accessKey, secretKey, "")
c.mustNotListObjects(ctx, uClient, bucket)
// 2.2 create and associate policy to user
policy := "mypolicy-test-user-update"
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::%s/*"
]
}
]
}`, bucket))
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
}
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
}
// 2.3 check user has access to bucket
c.mustListObjects(ctx, uClient, bucket)
// 2.3 check that user cannot delete the bucket
err = uClient.RemoveBucket(ctx, bucket)
if err == nil || err.Error() != "Access Denied." {
c.Fatalf("bucket was deleted unexpectedly or got unexpected err: %v", err)
}
// 3. Craft a request to update the user's permissions
ep := s.adm.GetEndpointURL()
urlValue := url.Values{}
urlValue.Add("accessKey", accessKey)
u, err := url.Parse(fmt.Sprintf("%s://%s/minio/admin/v3/add-user?%s", ep.Scheme, ep.Host, s3utils.QueryEncode(urlValue)))
if err != nil {
c.Fatalf("unexpected url parse err: %v", err)
}
req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.String(), nil)
if err != nil {
c.Fatalf("unexpected new request err: %v", err)
}
reqBodyArg := madmin.UserInfo{
SecretKey: secretKey,
PolicyName: "consoleAdmin",
Status: madmin.AccountEnabled,
}
buf, err := json.Marshal(reqBodyArg)
if err != nil {
c.Fatalf("unexpected json encode err: %v", err)
}
buf, err = madmin.EncryptData(secretKey, buf)
if err != nil {
c.Fatalf("unexpected encryption err: %v", err)
}
req.ContentLength = int64(len(buf))
sum := sha256.Sum256(buf)
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum[:]))
req.Body = io.NopCloser(bytes.NewReader(buf))
req = signer.SignV4(*req, accessKey, secretKey, "", "")
// 3.1 Execute the request.
resp, err := s.TestSuiteCommon.client.Do(req)
if err != nil {
c.Fatalf("unexpected request err: %v", err)
}
if resp.StatusCode != 200 {
c.Fatalf("got unexpected response: %#v\n", resp)
}
// 3.2 check that user cannot delete the bucket
err = uClient.RemoveBucket(ctx, bucket)
if err == nil || err.Error() != "Access Denied." {
c.Fatalf("User was able to escalate privileges (Err=%v)!", err)
}
}
func (s *TestSuiteIAM) TestAddServiceAccountPerms(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
// 1. Create a policy
policy1 := "deny-svc"
policy2 := "allow-svc"
policyBytes := []byte(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Deny",
"Action": [
"admin:CreateServiceAccount"
]
}
]
}`)
newPolicyBytes := []byte(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::testbucket/*"
]
}
]
}`)
err := s.adm.AddCannedPolicy(ctx, policy1, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
}
err = s.adm.AddCannedPolicy(ctx, policy2, newPolicyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
}
// 2. Verify that policy json is validated by server
invalidPolicyBytes := policyBytes[:len(policyBytes)-1]
err = s.adm.AddCannedPolicy(ctx, policy1+"invalid", invalidPolicyBytes)
if err == nil {
c.Fatalf("invalid policy creation success")
}
// 3. Create a user, associate policy and verify access
accessKey, secretKey := mustGenerateCredentials(c)
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
if err != nil {
c.Fatalf("Unable to set user: %v", err)
}
// 3.1 check that user does not have any access to the bucket
uClient := s.getUserClient(c, accessKey, secretKey, "")
c.mustNotListObjects(ctx, uClient, "testbucket")
// 3.2 associate policy to user
err = s.adm.SetPolicy(ctx, policy1, accessKey, false)
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
}
admClnt := s.getAdminClient(c, accessKey, secretKey, "")
// 3.3 check user does not have explicit permissions to create service account.
c.mustNotCreateSvcAccount(ctx, accessKey, admClnt)
// 4. Verify the policy appears in listing
ps, err := s.adm.ListCannedPolicies(ctx)
if err != nil {
c.Fatalf("policy list err: %v", err)
}
_, ok := ps[policy1]
if !ok {
c.Fatalf("policy was missing!")
}
// 3.2 associate policy to user
err = s.adm.SetPolicy(ctx, policy2, accessKey, false)
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
}
// 3.3 check user can create service account implicitly.
c.mustCreateSvcAccount(ctx, accessKey, admClnt)
_, ok = ps[policy2]
if !ok {
c.Fatalf("policy was missing!")
}
err = s.adm.RemoveUser(ctx, accessKey)
if err != nil {
c.Fatalf("user could not be deleted: %v", err)
}
err = s.adm.RemoveCannedPolicy(ctx, policy1)
if err != nil {
c.Fatalf("policy del err: %v", err)
}
err = s.adm.RemoveCannedPolicy(ctx, policy2)
if err != nil {
c.Fatalf("policy del err: %v", err)
}
}
func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
bucket := getRandomBucketName()
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket creat error: %v", err)
}
// 1. Create a policy
policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::%s/*"
]
}
]
}`, bucket))
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
}
// 2. Verify that policy json is validated by server
invalidPolicyBytes := policyBytes[:len(policyBytes)-1]
err = s.adm.AddCannedPolicy(ctx, policy+"invalid", invalidPolicyBytes)
if err == nil {
c.Fatalf("invalid policy creation success")
}
// 3. Create a user, associate policy and verify access
accessKey, secretKey := mustGenerateCredentials(c)
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
if err != nil {
c.Fatalf("Unable to set user: %v", err)
}
// 3.1 check that user does not have any access to the bucket
uClient := s.getUserClient(c, accessKey, secretKey, "")
c.mustNotListObjects(ctx, uClient, bucket)
// 3.2 associate policy to user
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
}
// 3.3 check user has access to bucket
c.mustListObjects(ctx, uClient, bucket)
// 3.4 Check that user cannot exceed their permissions
err = uClient.RemoveBucket(ctx, bucket)
if err == nil {
c.Fatalf("bucket was deleted!")
}
// 4. Verify the policy appears in listing
ps, err := s.adm.ListCannedPolicies(ctx)
if err != nil {
c.Fatalf("policy list err: %v", err)
}
_, ok := ps[policy]
if !ok {
c.Fatalf("policy was missing!")
}
// 5. Check that policy cannot be deleted when attached to a user.
err = s.adm.RemoveCannedPolicy(ctx, policy)
if err == nil {
c.Fatalf("policy could be unexpectedly deleted!")
}
// 6. Delete the user and then delete the policy.
err = s.adm.RemoveUser(ctx, accessKey)
if err != nil {
c.Fatalf("user could not be deleted: %v", err)
}
err = s.adm.RemoveCannedPolicy(ctx, policy)
if err != nil {
c.Fatalf("policy del err: %v", err)
}
}
func (s *TestSuiteIAM) TestCannedPolicies(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
policies, err := s.adm.ListCannedPolicies(ctx)
if err != nil {
c.Fatalf("unable to list policies: %v", err)
}
defaultPolicies := []string{
"readwrite",
"readonly",
"writeonly",
"diagnostics",
"consoleAdmin",
}
for _, v := range defaultPolicies {
if _, ok := policies[v]; !ok {
c.Fatalf("Failed to find %s in policies list", v)
}
}
bucket := getRandomBucketName()
err = s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket creat error: %v", err)
}
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::%s/*"
]
}
]
}`, bucket))
// Check that default policies can be overwritten.
err = s.adm.AddCannedPolicy(ctx, "readwrite", policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
}
info, err := s.adm.InfoCannedPolicy(ctx, "readwrite")
if err != nil {
c.Fatalf("policy info err: %v", err)
}
infoStr := string(info)
if !strings.Contains(infoStr, `"s3:PutObject"`) || !strings.Contains(infoStr, ":"+bucket+"/") {
c.Fatalf("policy contains unexpected content!")
}
}
func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
bucket := getRandomBucketName()
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket creat error: %v", err)
}
policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::%s/*"
]
}
]
}`, bucket))
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
}
accessKey, secretKey := mustGenerateCredentials(c)
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
if err != nil {
c.Fatalf("Unable to set user: %v", err)
}
// 1. Add user to a new group
group := "mygroup"
err = s.adm.UpdateGroupMembers(ctx, madmin.GroupAddRemove{
Group: group,
Members: []string{accessKey},
})
if err != nil {
c.Fatalf("Unable to add user to group: %v", err)
}
// 2. Check that user has no access
uClient := s.getUserClient(c, accessKey, secretKey, "")
c.mustNotListObjects(ctx, uClient, bucket)
// 3. Associate policy to group and check user got access.
err = s.adm.SetPolicy(ctx, policy, group, true)
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
}
// 3.1 check user has access to bucket
c.mustListObjects(ctx, uClient, bucket)
// 3.2 Check that user cannot exceed their permissions
err = uClient.RemoveBucket(ctx, bucket)
if err == nil {
c.Fatalf("bucket was deleted!")
}
// 4. List groups and members and verify
groups, err := s.adm.ListGroups(ctx)
if err != nil {
c.Fatalf("group list err: %v", err)
}
if !set.CreateStringSet(groups...).Contains(group) {
c.Fatalf("created group not present!")
}
groupInfo, err := s.adm.GetGroupDescription(ctx, group)
if err != nil {
c.Fatalf("group desc err: %v", err)
}
c.Assert(groupInfo.Name, group)
c.Assert(set.CreateStringSet(groupInfo.Members...), set.CreateStringSet(accessKey))
c.Assert(groupInfo.Policy, policy)
c.Assert(groupInfo.Status, string(madmin.GroupEnabled))
// 5. Disable/enable the group and verify that user access is revoked/restored.
err = s.adm.SetGroupStatus(ctx, group, madmin.GroupDisabled)
if err != nil {
c.Fatalf("group set status err: %v", err)
}
groupInfo, err = s.adm.GetGroupDescription(ctx, group)
if err != nil {
c.Fatalf("group desc err: %v", err)
}
c.Assert(groupInfo.Status, string(madmin.GroupDisabled))
c.mustNotListObjects(ctx, uClient, bucket)
err = s.adm.SetGroupStatus(ctx, group, madmin.GroupEnabled)
if err != nil {
c.Fatalf("group set status err: %v", err)
}
groupInfo, err = s.adm.GetGroupDescription(ctx, group)
if err != nil {
c.Fatalf("group desc err: %v", err)
}
c.Assert(groupInfo.Status, string(madmin.GroupEnabled))
c.mustListObjects(ctx, uClient, bucket)
// 6. Verify that group cannot be deleted with users.
err = s.adm.UpdateGroupMembers(ctx, madmin.GroupAddRemove{
Group: group,
IsRemove: true,
})
if err == nil {
c.Fatalf("group was removed!")
}
groupInfo, err = s.adm.GetGroupDescription(ctx, group)
if err != nil {
c.Fatalf("group desc err: %v", err)
}
c.Assert(groupInfo.Name, group)
// 7. Remove user from group and verify access is revoked.
err = s.adm.UpdateGroupMembers(ctx, madmin.GroupAddRemove{
Group: group,
Members: []string{accessKey},
IsRemove: true,
})
if err != nil {
c.Fatalf("group update err: %v", err)
}
c.mustNotListObjects(ctx, uClient, bucket)
// 7.1 verify group still exists
groupInfo, err = s.adm.GetGroupDescription(ctx, group)
if err != nil {
c.Fatalf("group desc err: %v", err)
}
c.Assert(groupInfo.Name, group)
c.Assert(len(groupInfo.Members), 0)
// 8. Delete group and verify
err = s.adm.UpdateGroupMembers(ctx, madmin.GroupAddRemove{
Group: group,
IsRemove: true,
})
if err != nil {
c.Fatalf("group update err: %v", err)
}
groups, err = s.adm.ListGroups(ctx)
if err != nil {
c.Fatalf("group list err: %v", err)
}
if set.CreateStringSet(groups...).Contains(group) {
c.Fatalf("created group still present!")
}
_, err = s.adm.GetGroupDescription(ctx, group)
if err == nil {
c.Fatalf("group appears to exist")
}
}
func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
bucket := getRandomBucketName()
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket creat error: %v", err)
}
// Create policy, user and associate policy
policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::%s/*"
]
}
]
}`, bucket))
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
}
accessKey, secretKey := mustGenerateCredentials(c)
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
if err != nil {
c.Fatalf("Unable to set user: %v", err)
}
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
}
// Create an madmin client with user creds
userAdmClient, err := madmin.NewWithOptions(s.endpoint, &madmin.Options{
Creds: credentials.NewStaticV4(accessKey, secretKey, ""),
Secure: s.secure,
})
if err != nil {
c.Fatalf("Err creating user admin client: %v", err)
}
userAdmClient.SetCustomTransport(s.TestSuiteCommon.client.Transport)
// Create svc acc
cr := c.mustCreateSvcAccount(ctx, accessKey, userAdmClient)
// 1. Check that svc account appears in listing
c.assertSvcAccAppearsInListing(ctx, userAdmClient, accessKey, cr.AccessKey)
// 2. Check that svc account info can be queried
c.assertSvcAccInfoQueryable(ctx, userAdmClient, accessKey, cr.AccessKey, false)
// 3. Check S3 access
c.assertSvcAccS3Access(ctx, s, cr, bucket)
// 4. Check that svc account can restrict the policy, and that the
// session policy can be updated.
c.assertSvcAccSessionPolicyUpdate(ctx, s, userAdmClient, accessKey, bucket)
// 4. Check that service account's secret key and account status can be
// updated.
c.assertSvcAccSecretKeyAndStatusUpdate(ctx, s, userAdmClient, accessKey, bucket)
// 5. Check that service account can be deleted.
c.assertSvcAccDeletion(ctx, s, userAdmClient, accessKey, bucket)
// 6. Check that service account cannot be created for some other user.
c.mustNotCreateSvcAccount(ctx, globalActiveCred.AccessKey, userAdmClient)
}
func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
bucket := getRandomBucketName()
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket creat error: %v", err)
}
// Create policy, user and associate policy
policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::%s/*"
]
}
]
}`, bucket))
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
}
accessKey, secretKey := mustGenerateCredentials(c)
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
if err != nil {
c.Fatalf("Unable to set user: %v", err)
}
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
}
// 1. Create a service account for the user
cr := c.mustCreateSvcAccount(ctx, accessKey, s.adm)
// 1.2 Check that svc account appears in listing
c.assertSvcAccAppearsInListing(ctx, s.adm, accessKey, cr.AccessKey)
// 1.3 Check that svc account info can be queried
c.assertSvcAccInfoQueryable(ctx, s.adm, accessKey, cr.AccessKey, false)
// 2. Check that svc account can access the bucket
c.assertSvcAccS3Access(ctx, s, cr, bucket)
// 3. Check that svc account can restrict the policy, and that the
// session policy can be updated.
c.assertSvcAccSessionPolicyUpdate(ctx, s, s.adm, accessKey, bucket)
// 4. Check that service account's secret key and account status can be
// updated.
c.assertSvcAccSecretKeyAndStatusUpdate(ctx, s, s.adm, accessKey, bucket)
// 5. Check that service account can be deleted.
c.assertSvcAccDeletion(ctx, s, s.adm, accessKey, bucket)
}
func (s *TestSuiteIAM) SetUpAccMgmtPlugin(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
pluginEndpoint := os.Getenv("POLICY_PLUGIN_ENDPOINT")
if pluginEndpoint == "" {
c.Skip("POLICY_PLUGIN_ENDPOINT not given - skipping.")
}
configCmds := []string{
"policy_plugin",
"url=" + pluginEndpoint,
}
_, err := s.adm.SetConfigKV(ctx, strings.Join(configCmds, " "))
if err != nil {
c.Fatalf("unable to setup access management plugin for tests: %v", err)
}
s.RestartIAMSuite(c)
}
// TestIAM_AMPInternalIDPServerSuite - tests for access management plugin
func TestIAM_AMPInternalIDPServerSuite(t *testing.T) {
for i, testCase := range iamTestSuites {
t.Run(
fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.ServerTypeDescription),
func(t *testing.T) {
suite := testCase
c := &check{t, testCase.serverType}
suite.SetUpSuite(c)
defer suite.TearDownSuite(c)
suite.SetUpAccMgmtPlugin(c)
suite.TestAccMgmtPlugin(c)
},
)
}
}
// TestAccMgmtPlugin - this test assumes that the access-management-plugin is
// the same as the example in `docs/iam/access-manager-plugin.go` -
// specifically, it denies only `s3:Put*` operations on non-root accounts.
func (s *TestSuiteIAM) TestAccMgmtPlugin(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
// 0. Check that owner is able to make-bucket.
bucket := getRandomBucketName()
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket creat error: %v", err)
}
// 1. Create a user.
accessKey, secretKey := mustGenerateCredentials(c)
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
if err != nil {
c.Fatalf("Unable to set user: %v", err)
}
// 2. Check new user appears in listing
usersMap, err := s.adm.ListUsers(ctx)
if err != nil {
c.Fatalf("error listing: %v", err)
}
v, ok := usersMap[accessKey]
if !ok {
c.Fatalf("user not listed: %s", accessKey)
}
c.Assert(v.Status, madmin.AccountEnabled)
// 3. Check that user is able to make a bucket.
client := s.getUserClient(c, accessKey, secretKey, "")
err = client.MakeBucket(ctx, getRandomBucketName(), minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("user not create bucket: %v", err)
}
// 3.1 check user has access to bucket
c.mustListObjects(ctx, client, bucket)
// 3.2 check that user cannot upload an object.
_, err = client.PutObject(ctx, bucket, "objectName", bytes.NewBuffer([]byte("some content")), 12, minio.PutObjectOptions{})
if err == nil {
c.Fatalf("user was able to upload unexpectedly")
}
// Create an madmin client with user creds
userAdmClient, err := madmin.NewWithOptions(s.endpoint, &madmin.Options{
Creds: credentials.NewStaticV4(accessKey, secretKey, ""),
Secure: s.secure,
})
if err != nil {
c.Fatalf("Err creating user admin client: %v", err)
}
userAdmClient.SetCustomTransport(s.TestSuiteCommon.client.Transport)
// Create svc acc
cr := c.mustCreateSvcAccount(ctx, accessKey, userAdmClient)
// 1. Check that svc account appears in listing
c.assertSvcAccAppearsInListing(ctx, userAdmClient, accessKey, cr.AccessKey)
// 2. Check that svc account info can be queried
c.assertSvcAccInfoQueryable(ctx, userAdmClient, accessKey, cr.AccessKey, false)
// 3. Check S3 access
c.assertSvcAccS3Access(ctx, s, cr, bucket)
// Check that session policies do not apply - as policy enforcement is
// delegated to plugin.
{
svcAK, svcSK := mustGenerateCredentials(c)
// This policy does not allow listing objects.
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject"
],
"Resource": [
"arn:aws:s3:::%s/*"
]
}
]
}`, bucket))
cr, err := userAdmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
Policy: policyBytes,
TargetUser: accessKey,
AccessKey: svcAK,
SecretKey: svcSK,
})
if err != nil {
c.Fatalf("Unable to create svc acc: %v", err)
}
svcClient := s.getUserClient(c, cr.AccessKey, cr.SecretKey, "")
// Though the attached policy does not allow listing, it will be
// ignored because the plugin allows it.
c.mustListObjects(ctx, svcClient, bucket)
}
// 4. Check that service account's secret key and account status can be
// updated.
c.assertSvcAccSecretKeyAndStatusUpdate(ctx, s, userAdmClient, accessKey, bucket)
// 5. Check that service account can be deleted.
c.assertSvcAccDeletion(ctx, s, userAdmClient, accessKey, bucket)
// 6. Check that service account **can** be created for some other user.
// This is possible because the policy enforced in the plugin.
c.mustCreateSvcAccount(ctx, globalActiveCred.AccessKey, userAdmClient)
}
func (c *check) mustCreateIAMUser(ctx context.Context, admClnt *madmin.AdminClient) madmin.Credentials {
c.Helper()
randUser := mustGetUUID()
randPass := <PASSWORD>()
err := admClnt.AddUser(ctx, randUser, randPass)
if err != nil {
c.Fatalf("should be able to create a user: %v", err)
}
return madmin.Credentials{
AccessKey: randUser,
SecretKey: randPass,
}
}
func (c *check) mustGetIAMUserInfo(ctx context.Context, admClnt *madmin.AdminClient, accessKey string) madmin.UserInfo {
c.Helper()
ui, err := admClnt.GetUserInfo(ctx, accessKey)
if err != nil {
c.Fatalf("should be able to get user info: %v", err)
}
return ui
}
func (c *check) mustNotCreateIAMUser(ctx context.Context, admClnt *madmin.AdminClient) {
c.Helper()
randUser := mustGetUUID()
randPass := mustGetUUID()
err := admClnt.AddUser(ctx, randUser, randPass)
if err == nil {
c.Fatalf("should not be able to create a user")
}
}
func (c *check) mustCreateSvcAccount(ctx context.Context, tgtUser string, admClnt *madmin.AdminClient) madmin.Credentials {
c.Helper()
cr, err := admClnt.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
TargetUser: tgtUser,
})
if err != nil {
c.Fatalf("user should be able to create service accounts %s", err)
}
return cr
}
func (c *check) mustNotCreateSvcAccount(ctx context.Context, tgtUser string, admClnt *madmin.AdminClient) {
c.Helper()
_, err := admClnt.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
TargetUser: tgtUser,
})
if err == nil {
c.Fatalf("user was able to add service accounts unexpectedly!")
}
}
func (c *check) mustNotListObjects(ctx context.Context, client *minio.Client, bucket string) {
c.Helper()
res := client.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
v, ok := <-res
if !ok || v.Err == nil {
c.Fatalf("user was able to list unexpectedly! on %s", bucket)
}
}
func (c *check) mustPutObjectWithTags(ctx context.Context, client *minio.Client, bucket, object string) {
c.Helper()
_, err := client.PutObject(ctx, bucket, object, bytes.NewBuffer([]byte("stuff")), 5, minio.PutObjectOptions{
UserTags: map[string]string{
"security": "public",
"virus": "true",
},
})
if err != nil {
c.Fatalf("user was unable to upload the object: %v", err)
}
}
func (c *check) mustGetObject(ctx context.Context, client *minio.Client, bucket, object string) {
c.Helper()
r, err := client.GetObject(ctx, bucket, object, minio.GetObjectOptions{})
if err != nil {
c.Fatalf("user was unable to download the object: %v", err)
}
defer r.Close()
_, err = io.Copy(io.Discard, r)
if err != nil {
c.Fatalf("user was unable to download the object: %v", err)
}
}
func (c *check) mustHeadObject(ctx context.Context, client *minio.Client, bucket, object string, tagCount int) {
c.Helper()
oinfo, err := client.StatObject(ctx, bucket, object, minio.StatObjectOptions{})
if err != nil {
c.Fatalf("user was unable to download the object: %v", err)
}
if oinfo.UserTagCount != tagCount {
c.Fatalf("expected tagCount: %d, got %d", tagCount, oinfo.UserTagCount)
}
}
func (c *check) mustListObjects(ctx context.Context, client *minio.Client, bucket string) {
c.Helper()
res := client.ListObjects(ctx, bucket, minio.ListObjectsOptions{})
v, ok := <-res
if ok && v.Err != nil {
c.Fatalf("user was unable to list: %v", v.Err)
}
}
func (c *check) mustListBuckets(ctx context.Context, client *minio.Client) {
c.Helper()
_, err := client.ListBuckets(ctx)
if err != nil {
c.Fatalf("user was unable to list buckets: %v", err)
}
}
func (c *check) mustNotDelete(ctx context.Context, client *minio.Client, bucket string, vid string) {
c.Helper()
err := client.RemoveObject(ctx, bucket, "some-object", minio.RemoveObjectOptions{VersionID: vid})
if err == nil {
c.Fatalf("user must not be allowed to delete")
}
err = client.RemoveObject(ctx, bucket, "some-object", minio.RemoveObjectOptions{})
if err != nil {
c.Fatal("user must be able to create delete marker")
}
}
func (c *check) mustDownload(ctx context.Context, client *minio.Client, bucket string) {
c.Helper()
rd, err := client.GetObject(ctx, bucket, "some-object", minio.GetObjectOptions{})
if err != nil {
c.Fatalf("download did not succeed got %#v", err)
}
if _, err = io.Copy(io.Discard, rd); err != nil {
c.Fatalf("download did not succeed got %#v", err)
}
}
func (c *check) mustUploadReturnVersions(ctx context.Context, client *minio.Client, bucket string) []string {
c.Helper()
versions := []string{}
for i := 0; i < 5; i++ {
ui, err := client.PutObject(ctx, bucket, "some-object", bytes.NewBuffer([]byte("stuff")), 5, minio.PutObjectOptions{})
if err != nil {
c.Fatalf("upload did not succeed got %#v", err)
}
versions = append(versions, ui.VersionID)
}
return versions
}
func (c *check) mustUpload(ctx context.Context, client *minio.Client, bucket string) {
c.Helper()
_, err := client.PutObject(ctx, bucket, "some-object", bytes.NewBuffer([]byte("stuff")), 5, minio.PutObjectOptions{})
if err != nil {
c.Fatalf("upload did not succeed got %#v", err)
}
}
func (c *check) mustNotUpload(ctx context.Context, client *minio.Client, bucket string) {
c.Helper()
_, err := client.PutObject(ctx, bucket, "some-object", bytes.NewBuffer([]byte("stuff")), 5, minio.PutObjectOptions{})
if e, ok := err.(minio.ErrorResponse); ok {
if e.Code == "AccessDenied" {
return
}
}
c.Fatalf("upload did not get an AccessDenied error - got %#v instead", err)
}
func (c *check) assertSvcAccS3Access(ctx context.Context, s *TestSuiteIAM, cr madmin.Credentials, bucket string) {
svcClient := s.getUserClient(c, cr.AccessKey, cr.SecretKey, "")
c.mustListObjects(ctx, svcClient, bucket)
}
func (c *check) assertSvcAccAppearsInListing(ctx context.Context, madmClient *madmin.AdminClient, parentAK, svcAK string) {
c.Helper()
listResp, err := madmClient.ListServiceAccounts(ctx, parentAK)
if err != nil {
c.Fatalf("unable to list svc accounts: %v", err)
}
var accessKeys []string
for _, item := range listResp.Accounts {
accessKeys = append(accessKeys, item.AccessKey)
}
if !set.CreateStringSet(accessKeys...).Contains(svcAK) {
c.Fatalf("service account did not appear in listing!")
}
}
func (c *check) assertSvcAccInfoQueryable(ctx context.Context, madmClient *madmin.AdminClient, parentAK, svcAK string, skipParentUserCheck bool) {
infoResp, err := madmClient.InfoServiceAccount(ctx, svcAK)
if err != nil {
c.Fatalf("unable to get svc acc info: %v", err)
}
if !skipParentUserCheck {
c.Assert(infoResp.ParentUser, parentAK)
}
c.Assert(infoResp.AccountStatus, "on")
c.Assert(infoResp.ImpliedPolicy, true)
}
// This test assumes that the policy for `accessKey` allows listing on the given
// bucket. It creates a session policy that restricts listing on the bucket and
// then enables it again in a session policy update call.
func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuiteIAM, madmClient *madmin.AdminClient, accessKey, bucket string) {
c.Helper()
svcAK, svcSK := mustGenerateCredentials(c)
// This policy does not allow listing objects.
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject"
],
"Resource": [
"arn:aws:s3:::%s/*"
]
}
]
}`, bucket))
cr, err := madmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
Policy: policyBytes,
TargetUser: accessKey,
AccessKey: svcAK,
SecretKey: svcSK,
})
if err != nil {
c.Fatalf("Unable to create svc acc: %v", err)
}
svcClient := s.getUserClient(c, cr.AccessKey, cr.SecretKey, "")
c.mustNotListObjects(ctx, svcClient, bucket)
// This policy allows listing objects.
newPolicyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::%s/*"
]
}
]
}`, bucket))
err = madmClient.UpdateServiceAccount(ctx, svcAK, madmin.UpdateServiceAccountReq{
NewPolicy: newPolicyBytes,
})
if err != nil {
c.Fatalf("unable to update session policy for svc acc: %v", err)
}
c.mustListObjects(ctx, svcClient, bucket)
}
func (c *check) assertSvcAccSecretKeyAndStatusUpdate(ctx context.Context, s *TestSuiteIAM, madmClient *madmin.AdminClient, accessKey, bucket string) {
c.Helper()
svcAK, svcSK := mustGenerateCredentials(c)
cr, err := madmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
TargetUser: accessKey,
AccessKey: svcAK,
SecretKey: svcSK,
})
if err != nil {
c.Fatalf("Unable to create svc acc: %v", err)
}
svcClient := s.getUserClient(c, cr.AccessKey, cr.SecretKey, "")
c.mustListObjects(ctx, svcClient, bucket)
_, svcSK2 := mustGenerateCredentials(c)
err = madmClient.UpdateServiceAccount(ctx, svcAK, madmin.UpdateServiceAccountReq{
NewSecretKey: svcSK2,
})
if err != nil {
c.Fatalf("unable to update secret key for svc acc: %v", err)
}
// old creds should not work:
c.mustNotListObjects(ctx, svcClient, bucket)
// new creds work:
svcClient2 := s.getUserClient(c, cr.AccessKey, svcSK2, "")
c.mustListObjects(ctx, svcClient2, bucket)
// update status to disabled
err = madmClient.UpdateServiceAccount(ctx, svcAK, madmin.UpdateServiceAccountReq{
NewStatus: "off",
})
if err != nil {
c.Fatalf("unable to update secret key for svc acc: %v", err)
}
c.mustNotListObjects(ctx, svcClient2, bucket)
}
func (c *check) assertSvcAccDeletion(ctx context.Context, s *TestSuiteIAM, madmClient *madmin.AdminClient, accessKey, bucket string) {
c.Helper()
svcAK, svcSK := mustGenerateCredentials(c)
cr, err := madmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
TargetUser: accessKey,
AccessKey: svcAK,
SecretKey: svcSK,
})
if err != nil {
c.Fatalf("Unable to create svc acc: %v", err)
}
svcClient := s.getUserClient(c, cr.AccessKey, cr.SecretKey, "")
c.mustListObjects(ctx, svcClient, bucket)
err = madmClient.DeleteServiceAccount(ctx, svcAK)
if err != nil {
c.Fatalf("unable to delete svc acc: %v", err)
}
c.mustNotListObjects(ctx, svcClient, bucket)
}
func mustGenerateCredentials(c *check) (string, string) {
c.Helper()
ak, sk, err := auth.GenerateCredentials()
if err != nil {
c.Fatalf("unable to generate credentials: %v", err)
}
return ak, sk
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"testing"
)
func prepareErasurePools() (ObjectLayer, []string, error) {
nDisks := 32
fsDirs, err := getRandomDisks(nDisks)
if err != nil {
return nil, nil, err
}
pools := mustGetPoolEndpoints(fsDirs[:16]...)
pools = append(pools, mustGetPoolEndpoints(fsDirs[16:]...)...)
// Everything is fine, should return nil
objLayer, err := newErasureServerPools(context.Background(), pools)
if err != nil {
return nil, nil, err
}
return objLayer, fsDirs, nil
}
func TestPoolMetaValidate(t *testing.T) {
objLayer1, fsDirs, err := prepareErasurePools()
if err != nil {
t.Fatal(err)
}
defer removeRoots(fsDirs)
meta := objLayer1.(*erasureServerPools).poolMeta
pools := objLayer1.(*erasureServerPools).serverPools
objLayer2, fsDirs, err := prepareErasurePools()
if err != nil {
t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err)
}
defer removeRoots(fsDirs)
newPools := objLayer2.(*erasureServerPools).serverPools
reducedPools := pools[1:]
orderChangePools := []*erasureSets{
pools[1],
pools[0],
}
var nmeta1 poolMeta
nmeta1.Version = poolMetaVersion
nmeta1.Pools = append(nmeta1.Pools, meta.Pools...)
for i, pool := range nmeta1.Pools {
if i == 0 {
nmeta1.Pools[i] = PoolStatus{
CmdLine: pool.CmdLine,
ID: i,
LastUpdate: UTCNow(),
Decommission: &PoolDecommissionInfo{
Complete: true,
},
}
}
}
var nmeta2 poolMeta
nmeta2.Version = poolMetaVersion
nmeta2.Pools = append(nmeta2.Pools, meta.Pools...)
for i, pool := range nmeta2.Pools {
if i == 0 {
nmeta2.Pools[i] = PoolStatus{
CmdLine: pool.CmdLine,
ID: i,
LastUpdate: UTCNow(),
Decommission: &PoolDecommissionInfo{
Complete: false,
},
}
}
}
testCases := []struct {
meta poolMeta
pools []*erasureSets
expectedUpdate bool
expectedErr bool
name string
}{
{
meta: meta,
pools: pools,
name: "Correct",
expectedErr: false,
expectedUpdate: false,
},
{
meta: meta,
pools: newPools,
name: "Correct-Update",
expectedErr: false,
expectedUpdate: true,
},
{
meta: meta,
pools: reducedPools,
name: "Correct-Update",
expectedErr: false,
expectedUpdate: true,
},
{
meta: meta,
pools: orderChangePools,
name: "Invalid-Orderchange",
expectedErr: false,
expectedUpdate: true,
},
{
meta: nmeta1,
pools: pools,
name: "Invalid-Completed-Pool-Not-Removed",
expectedErr: true,
expectedUpdate: false,
},
{
meta: nmeta2,
pools: pools,
name: "Correct-Decom-Pending",
expectedErr: false,
expectedUpdate: false,
},
{
meta: nmeta2,
pools: reducedPools,
name: "Invalid-Decom-Pending-Pool-Removal",
expectedErr: false,
expectedUpdate: true,
},
{
meta: nmeta1,
pools: reducedPools,
name: "Correct-Decom-Pool-Removed",
expectedErr: false,
expectedUpdate: true,
},
{
meta: poolMeta{}, // no-pool info available fresh setup.
pools: pools,
name: "Correct-Fresh-Setup",
expectedErr: false,
expectedUpdate: true,
},
{
meta: nmeta2,
pools: orderChangePools,
name: "Invalid-Orderchange-Decom",
expectedErr: false,
expectedUpdate: true,
},
}
t.Parallel()
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
update, err := testCase.meta.validate(testCase.pools)
if testCase.expectedErr {
t.Log(err)
}
if err != nil && !testCase.expectedErr {
t.Errorf("Expected success, but found %s", err)
}
if err == nil && testCase.expectedErr {
t.Error("Expected error, but got `nil`")
}
if update != testCase.expectedUpdate {
t.Errorf("Expected %t, got %t", testCase.expectedUpdate, update)
}
})
}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"archive/zip"
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"os"
"strings"
"github.com/minio/cli"
"github.com/tinylib/msgp/msgp"
)
func main() {
app := cli.NewApp()
app.Copyright = "MinIO, Inc."
app.Usage = "healing.bin to JSON"
app.HideVersion = true
app.HideHelpCommand = true
app.CustomAppHelpTemplate = `NAME:
{{.Name}} - {{.Usage}}
USAGE:
{{.Name}} {{if .VisibleFlags}}[FLAGS]{{end}} [HEALINGBINFILE|INSPECTZIPFILE]
files ending in '.zip' will be searched for '.healing.bin files recursively and
printed together as a single JSON.
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}
`
app.Flags = []cli.Flag{}
app.Action = func(c *cli.Context) error {
if !c.Args().Present() {
cli.ShowAppHelpAndExit(c, 1) // last argument is exit code
}
ht := make(map[string]map[string]interface{})
file := c.Args().Get(0)
if strings.HasSuffix(file, ".zip") {
var sz int64
f, err := os.Open(file)
if err != nil {
return err
}
if st, err := f.Stat(); err == nil {
sz = st.Size()
}
defer f.Close()
zr, err := zip.NewReader(f, sz)
if err != nil {
return err
}
for _, file := range zr.File {
if !file.FileInfo().IsDir() && strings.HasSuffix(file.Name, ".healing.bin") {
r, err := file.Open()
if err != nil {
return err
}
b, err := io.ReadAll(r)
if err != nil {
return err
}
buf := bytes.NewBuffer(nil)
if _, err = msgp.CopyToJSON(buf, bytes.NewReader(b)); err != nil {
return err
}
dec := json.NewDecoder(buf)
// Use number to preserve integers.
dec.UseNumber()
var htr map[string]interface{}
if err = dec.Decode(&htr); err != nil {
return err
}
ht[file.Name] = htr
}
}
b, err := json.MarshalIndent(ht, "", " ")
if err != nil {
return err
}
fmt.Println(string(b))
return nil
}
b, err := os.ReadFile(file)
if err != nil {
return err
}
buf := bytes.NewBuffer(nil)
if _, err = msgp.CopyToJSON(buf, bytes.NewReader(b)); err != nil {
return err
}
var htr map[string]interface{}
dec := json.NewDecoder(buf)
// Use number to preserve integers.
dec.UseNumber()
if err = dec.Decode(&htr); err != nil {
return err
}
ht[file] = htr
b, err = json.MarshalIndent(ht, "", " ")
if err != nil {
return err
}
fmt.Println(string(b))
return nil
}
err := app.Run(os.Args)
if err != nil {
log.Fatal(err)
}
}
<file_sep>//go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"encoding/json"
"flag"
"fmt"
"io"
"log"
"net/http"
"strings"
)
var (
keyFile string
certFile string
)
func init() {
flag.StringVar(&keyFile, "key-file", "", "Path to TLS cert key file")
flag.StringVar(&certFile, "cert-file", "", "Path to TLS cert file")
}
func writeErrorResponse(w http.ResponseWriter, err error) {
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(map[string]string{
"error": fmt.Sprintf("%v", err),
})
}
type Result struct {
Result bool `json:"result"`
}
func mainHandler(w http.ResponseWriter, r *http.Request) {
body, err := io.ReadAll(r.Body)
if err != nil {
writeErrorResponse(w, err)
return
}
reqMap := make(map[string]interface{})
err = json.Unmarshal(body, &reqMap)
if err != nil {
writeErrorResponse(w, err)
return
}
// fmt.Printf("request: %#v\n", reqMap)
m := reqMap["input"].(map[string]interface{})
accountValue := m["account"].(string)
actionValue := m["action"].(string)
// Allow user `minio` to perform any action.
var res Result
if accountValue == "minio" {
res.Result = true
} else {
// All other users may not perform any `s3:Put*` operations.
res.Result = true
if strings.HasPrefix(actionValue, "s3:Put") {
res.Result = false
}
}
fmt.Printf("account: %v | action: %v | allowed: %v\n", accountValue, actionValue, res.Result)
json.NewEncoder(w).Encode(res)
return
}
func main() {
flag.Parse()
serveFunc := func() error {
return http.ListenAndServe(":8080", nil)
}
if certFile != "" || keyFile != "" {
if certFile == "" || keyFile == "" {
log.Fatal("Please provide both a key file and a cert file to enable TLS.")
}
serveFunc = func() error {
return http.ListenAndServeTLS(":8080", certFile, keyFile, nil)
}
}
http.HandleFunc("/", mainHandler)
log.Print("Listening on :8080")
log.Fatal(serveFunc())
}
<file_sep>#!/bin/sh
#
# If command starts with an option, prepend minio.
if [ "${1}" != "minio" ]; then
if [ -n "${1}" ]; then
set -- minio "$@"
fi
fi
# su-exec to requested user, if service cannot run exec will fail.
docker_switch_user() {
if [ -n "${MINIO_USERNAME}" ] && [ -n "${MINIO_GROUPNAME}" ]; then
if [ -n "${MINIO_UID}" ] && [ -n "${MINIO_GID}" ]; then
groupadd -f -g "$MINIO_GID" "$MINIO_GROUPNAME" &&
useradd -u "$MINIO_UID" -g "$MINIO_GROUPNAME" "$MINIO_USERNAME"
else
groupadd -f "$MINIO_GROUPNAME" &&
useradd -g "$MINIO_GROUPNAME" "$MINIO_USERNAME"
fi
exec setpriv --reuid="${MINIO_USERNAME}" \
--regid="${MINIO_GROUPNAME}" --keep-groups "$@"
else
exec "$@"
fi
}
## Switch to user if applicable.
docker_switch_user "$@"
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"net/http"
"sync"
"sync/atomic"
xhttp "github.com/minio/minio/internal/http"
"github.com/prometheus/client_golang/prometheus"
)
// ConnStats - Network statistics
// Count total input/output transferred bytes during
// the server's life.
type ConnStats struct {
totalInputBytes uint64
totalOutputBytes uint64
s3InputBytes uint64
s3OutputBytes uint64
adminInputBytes uint64
adminOutputBytes uint64
}
// Increase internode total input bytes
func (s *ConnStats) incInputBytes(n int64) {
atomic.AddUint64(&s.totalInputBytes, uint64(n))
}
// Increase internode total output bytes
func (s *ConnStats) incOutputBytes(n int64) {
atomic.AddUint64(&s.totalOutputBytes, uint64(n))
}
// Return internode total input bytes
func (s *ConnStats) getTotalInputBytes() uint64 {
return atomic.LoadUint64(&s.totalInputBytes)
}
// Return total output bytes
func (s *ConnStats) getTotalOutputBytes() uint64 {
return atomic.LoadUint64(&s.totalOutputBytes)
}
// Increase S3 total input bytes
func (s *ConnStats) incS3InputBytes(n int64) {
atomic.AddUint64(&s.s3InputBytes, uint64(n))
}
// Increase S3 total output bytes
func (s *ConnStats) incS3OutputBytes(n int64) {
atomic.AddUint64(&s.s3OutputBytes, uint64(n))
}
// Return S3 total input bytes
func (s *ConnStats) getS3InputBytes() uint64 {
return atomic.LoadUint64(&s.s3InputBytes)
}
// Return S3 total output bytes
func (s *ConnStats) getS3OutputBytes() uint64 {
return atomic.LoadUint64(&s.s3OutputBytes)
}
// Increase Admin total input bytes
func (s *ConnStats) incAdminInputBytes(n int64) {
atomic.AddUint64(&s.adminInputBytes, uint64(n))
}
// Increase Admin total output bytes
func (s *ConnStats) incAdminOutputBytes(n int64) {
atomic.AddUint64(&s.adminOutputBytes, uint64(n))
}
// Return Admin total input bytes
func (s *ConnStats) getAdminInputBytes() uint64 {
return atomic.LoadUint64(&s.adminInputBytes)
}
// Return Admin total output bytes
func (s *ConnStats) getAdminOutputBytes() uint64 {
return atomic.LoadUint64(&s.adminOutputBytes)
}
// Return connection stats (total input/output bytes and total s3 input/output bytes)
func (s *ConnStats) toServerConnStats() ServerConnStats {
return ServerConnStats{
TotalInputBytes: s.getTotalInputBytes(), // Traffic internode received
TotalOutputBytes: s.getTotalOutputBytes(), // Traffic internode sent
S3InputBytes: s.getS3InputBytes(), // Traffic S3 received
S3OutputBytes: s.getS3OutputBytes(), // Traffic S3 sent
AdminInputBytes: s.getAdminInputBytes(), // Traffic admin calls received
AdminOutputBytes: s.getAdminOutputBytes(), // Traffic admin calls sent
}
}
// Prepare new ConnStats structure
func newConnStats() *ConnStats {
return &ConnStats{}
}
type bucketS3RXTX struct {
s3InputBytes uint64
s3OutputBytes uint64
}
type bucketHTTPAPIStats struct {
currentS3Requests *HTTPAPIStats
totalS3Requests *HTTPAPIStats
totalS34xxErrors *HTTPAPIStats
totalS35xxErrors *HTTPAPIStats
totalS3Canceled *HTTPAPIStats
}
type bucketHTTPStats struct {
sync.RWMutex
httpStats map[string]bucketHTTPAPIStats
}
func newBucketHTTPStats() *bucketHTTPStats {
return &bucketHTTPStats{
httpStats: make(map[string]bucketHTTPAPIStats),
}
}
func (bh *bucketHTTPStats) delete(bucket string) {
bh.Lock()
defer bh.Unlock()
delete(bh.httpStats, bucket)
}
func (bh *bucketHTTPStats) updateHTTPStats(bucket, api string, w *xhttp.ResponseRecorder) {
if bh == nil {
return
}
bh.Lock()
defer bh.Unlock()
hstats, ok := bh.httpStats[bucket]
if !ok {
hstats = bucketHTTPAPIStats{
currentS3Requests: &HTTPAPIStats{},
totalS3Requests: &HTTPAPIStats{},
totalS3Canceled: &HTTPAPIStats{},
totalS34xxErrors: &HTTPAPIStats{},
totalS35xxErrors: &HTTPAPIStats{},
}
}
if w == nil { // when response recorder nil, this is an active request
hstats.currentS3Requests.Inc(api)
bh.httpStats[bucket] = hstats
return
} // else {
hstats.currentS3Requests.Dec(api) // decrement this once we have the response recorder.
hstats.totalS3Requests.Inc(api)
code := w.StatusCode
switch {
case code == 0:
case code == 499:
// 499 is a good error, shall be counted as canceled.
hstats.totalS3Canceled.Inc(api)
case code >= http.StatusBadRequest:
if code >= http.StatusInternalServerError {
hstats.totalS35xxErrors.Inc(api)
} else {
hstats.totalS34xxErrors.Inc(api)
}
}
bh.httpStats[bucket] = hstats
}
func (bh *bucketHTTPStats) load(bucket string) bucketHTTPAPIStats {
if bh == nil {
return bucketHTTPAPIStats{
currentS3Requests: &HTTPAPIStats{},
totalS3Requests: &HTTPAPIStats{},
totalS3Canceled: &HTTPAPIStats{},
totalS34xxErrors: &HTTPAPIStats{},
totalS35xxErrors: &HTTPAPIStats{},
}
}
bh.RLock()
defer bh.RUnlock()
val, ok := bh.httpStats[bucket]
if ok {
return val
}
return bucketHTTPAPIStats{
currentS3Requests: &HTTPAPIStats{},
totalS3Requests: &HTTPAPIStats{},
totalS3Canceled: &HTTPAPIStats{},
totalS34xxErrors: &HTTPAPIStats{},
totalS35xxErrors: &HTTPAPIStats{},
}
}
type bucketConnStats struct {
sync.RWMutex
stats map[string]*bucketS3RXTX
}
func newBucketConnStats() *bucketConnStats {
return &bucketConnStats{
stats: make(map[string]*bucketS3RXTX),
}
}
// Increase S3 total input bytes for input bucket
func (s *bucketConnStats) incS3InputBytes(bucket string, n int64) {
s.Lock()
defer s.Unlock()
stats, ok := s.stats[bucket]
if !ok {
stats = &bucketS3RXTX{
s3InputBytes: uint64(n),
}
} else {
stats.s3InputBytes += uint64(n)
}
s.stats[bucket] = stats
}
// Increase S3 total output bytes for input bucket
func (s *bucketConnStats) incS3OutputBytes(bucket string, n int64) {
s.Lock()
defer s.Unlock()
stats, ok := s.stats[bucket]
if !ok {
stats = &bucketS3RXTX{
s3OutputBytes: uint64(n),
}
} else {
stats.s3OutputBytes += uint64(n)
}
s.stats[bucket] = stats
}
// Return S3 total input bytes for input bucket
func (s *bucketConnStats) getS3InputBytes(bucket string) uint64 {
s.RLock()
defer s.RUnlock()
stats := s.stats[bucket]
if stats == nil {
return 0
}
return stats.s3InputBytes
}
// Return S3 total output bytes
func (s *bucketConnStats) getS3OutputBytes(bucket string) uint64 {
s.RLock()
defer s.RUnlock()
stats := s.stats[bucket]
if stats == nil {
return 0
}
return stats.s3OutputBytes
}
// delete metrics once bucket is deleted.
func (s *bucketConnStats) delete(bucket string) {
s.Lock()
defer s.Unlock()
delete(s.stats, bucket)
}
// HTTPAPIStats holds statistics information about
// a given API in the requests.
type HTTPAPIStats struct {
apiStats map[string]int
sync.RWMutex
}
// Inc increments the api stats counter.
func (stats *HTTPAPIStats) Inc(api string) {
if stats == nil {
return
}
stats.Lock()
defer stats.Unlock()
if stats.apiStats == nil {
stats.apiStats = make(map[string]int)
}
stats.apiStats[api]++
}
// Dec increments the api stats counter.
func (stats *HTTPAPIStats) Dec(api string) {
if stats == nil {
return
}
stats.Lock()
defer stats.Unlock()
if val, ok := stats.apiStats[api]; ok && val > 0 {
stats.apiStats[api]--
}
}
// Get returns the current counter on input API string
func (stats *HTTPAPIStats) Get(api string) int {
if stats == nil {
return 0
}
stats.RLock()
defer stats.RUnlock()
val, ok := stats.apiStats[api]
if ok {
return val
}
return 0
}
// Load returns the recorded stats.
func (stats *HTTPAPIStats) Load() map[string]int {
if stats == nil {
return map[string]int{}
}
stats.RLock()
defer stats.RUnlock()
apiStats := make(map[string]int, len(stats.apiStats))
for k, v := range stats.apiStats {
apiStats[k] = v
}
return apiStats
}
// HTTPStats holds statistics information about
// HTTP requests made by all clients
type HTTPStats struct {
s3RequestsInQueue int32 // ref: https://golang.org/pkg/sync/atomic/#pkg-note-BUG
_ int32 // For 64 bits alignment
s3RequestsIncoming uint64
rejectedRequestsAuth uint64
rejectedRequestsTime uint64
rejectedRequestsHeader uint64
rejectedRequestsInvalid uint64
currentS3Requests HTTPAPIStats
totalS3Requests HTTPAPIStats
totalS3Errors HTTPAPIStats
totalS34xxErrors HTTPAPIStats
totalS35xxErrors HTTPAPIStats
totalS3Canceled HTTPAPIStats
}
func (st *HTTPStats) loadRequestsInQueue() int32 {
return atomic.LoadInt32(&st.s3RequestsInQueue)
}
func (st *HTTPStats) addRequestsInQueue(i int32) {
atomic.AddInt32(&st.s3RequestsInQueue, i)
}
func (st *HTTPStats) incS3RequestsIncoming() {
// Golang automatically resets to zero if this overflows
atomic.AddUint64(&st.s3RequestsIncoming, 1)
}
// Converts http stats into struct to be sent back to the client.
func (st *HTTPStats) toServerHTTPStats() ServerHTTPStats {
serverStats := ServerHTTPStats{}
serverStats.S3RequestsIncoming = atomic.SwapUint64(&st.s3RequestsIncoming, 0)
serverStats.S3RequestsInQueue = atomic.LoadInt32(&st.s3RequestsInQueue)
serverStats.TotalS3RejectedAuth = atomic.LoadUint64(&st.rejectedRequestsAuth)
serverStats.TotalS3RejectedTime = atomic.LoadUint64(&st.rejectedRequestsTime)
serverStats.TotalS3RejectedHeader = atomic.LoadUint64(&st.rejectedRequestsHeader)
serverStats.TotalS3RejectedInvalid = atomic.LoadUint64(&st.rejectedRequestsInvalid)
serverStats.CurrentS3Requests = ServerHTTPAPIStats{
APIStats: st.currentS3Requests.Load(),
}
serverStats.TotalS3Requests = ServerHTTPAPIStats{
APIStats: st.totalS3Requests.Load(),
}
serverStats.TotalS3Errors = ServerHTTPAPIStats{
APIStats: st.totalS3Errors.Load(),
}
serverStats.TotalS34xxErrors = ServerHTTPAPIStats{
APIStats: st.totalS34xxErrors.Load(),
}
serverStats.TotalS35xxErrors = ServerHTTPAPIStats{
APIStats: st.totalS35xxErrors.Load(),
}
serverStats.TotalS3Canceled = ServerHTTPAPIStats{
APIStats: st.totalS3Canceled.Load(),
}
return serverStats
}
// Update statistics from http request and response data
func (st *HTTPStats) updateStats(api string, w *xhttp.ResponseRecorder) {
st.totalS3Requests.Inc(api)
// Increment the prometheus http request response histogram with appropriate label
httpRequestsDuration.With(prometheus.Labels{"api": api}).Observe(w.TimeToFirstByte.Seconds())
code := w.StatusCode
switch {
case code == 0:
case code == 499:
// 499 is a good error, shall be counted as canceled.
st.totalS3Canceled.Inc(api)
case code >= http.StatusBadRequest:
st.totalS3Errors.Inc(api)
if code >= http.StatusInternalServerError {
st.totalS35xxErrors.Inc(api)
} else {
st.totalS34xxErrors.Inc(api)
}
}
}
// Prepare new HTTPStats structure
func newHTTPStats() *HTTPStats {
return &HTTPStats{}
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"fmt"
"net/url"
"strings"
"sync"
"github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/event"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/pubsub"
"github.com/minio/pkg/bucket/policy"
)
// EventNotifier - notifies external systems about events in MinIO.
type EventNotifier struct {
sync.RWMutex
targetList *event.TargetList
targetResCh chan event.TargetIDResult
bucketRulesMap map[string]event.RulesMap
}
// NewEventNotifier - creates new event notification object.
func NewEventNotifier() *EventNotifier {
// targetList/bucketRulesMap/bucketRemoteTargetRulesMap are populated by NotificationSys.InitBucketTargets()
return &EventNotifier{
targetList: event.NewTargetList(),
targetResCh: make(chan event.TargetIDResult),
bucketRulesMap: make(map[string]event.RulesMap),
}
}
// GetARNList - returns available ARNs.
func (evnot *EventNotifier) GetARNList(onlyActive bool) []string {
arns := []string{}
if evnot == nil {
return arns
}
region := globalSite.Region
for targetID, target := range evnot.targetList.TargetMap() {
// httpclient target is part of ListenNotification
// which doesn't need to be listed as part of the ARN list
// This list is only meant for external targets, filter
// this out pro-actively.
if !strings.HasPrefix(targetID.ID, "httpclient+") {
if onlyActive {
if _, err := target.IsActive(); err != nil {
continue
}
}
arns = append(arns, targetID.ToARN(region).String())
}
}
return arns
}
// Loads notification policies for all buckets into EventNotifier.
func (evnot *EventNotifier) set(bucket BucketInfo, meta BucketMetadata) {
config := meta.notificationConfig
if config == nil {
return
}
config.SetRegion(globalSite.Region)
if err := config.Validate(globalSite.Region, globalEventNotifier.targetList); err != nil {
if _, ok := err.(*event.ErrARNNotFound); !ok {
logger.LogIf(GlobalContext, err)
}
}
evnot.AddRulesMap(bucket.Name, config.ToRulesMap())
}
// InitBucketTargets - initializes event notification system from notification.xml of all buckets.
func (evnot *EventNotifier) InitBucketTargets(ctx context.Context, objAPI ObjectLayer) error {
if objAPI == nil {
return errServerNotInitialized
}
if err := evnot.targetList.Add(globalNotifyTargetList.Targets()...); err != nil {
return err
}
go func() {
for res := range evnot.targetResCh {
if res.Err != nil {
reqInfo := &logger.ReqInfo{}
reqInfo.AppendTags("targetID", res.ID.Name)
logger.LogOnceIf(logger.SetReqInfo(GlobalContext, reqInfo), res.Err, res.ID.String())
}
}
}()
return nil
}
// AddRulesMap - adds rules map for bucket name.
func (evnot *EventNotifier) AddRulesMap(bucketName string, rulesMap event.RulesMap) {
evnot.Lock()
defer evnot.Unlock()
rulesMap = rulesMap.Clone()
// Do not add for an empty rulesMap.
if len(rulesMap) == 0 {
delete(evnot.bucketRulesMap, bucketName)
} else {
evnot.bucketRulesMap[bucketName] = rulesMap
}
}
// RemoveRulesMap - removes rules map for bucket name.
func (evnot *EventNotifier) RemoveRulesMap(bucketName string, rulesMap event.RulesMap) {
evnot.Lock()
defer evnot.Unlock()
evnot.bucketRulesMap[bucketName].Remove(rulesMap)
if len(evnot.bucketRulesMap[bucketName]) == 0 {
delete(evnot.bucketRulesMap, bucketName)
}
}
// ConfiguredTargetIDs - returns list of configured target id's
func (evnot *EventNotifier) ConfiguredTargetIDs() []event.TargetID {
if evnot == nil {
return nil
}
evnot.RLock()
defer evnot.RUnlock()
var targetIDs []event.TargetID
for _, rmap := range evnot.bucketRulesMap {
for _, rules := range rmap {
for _, targetSet := range rules {
for id := range targetSet {
targetIDs = append(targetIDs, id)
}
}
}
}
return targetIDs
}
// RemoveNotification - removes all notification configuration for bucket name.
func (evnot *EventNotifier) RemoveNotification(bucketName string) {
evnot.Lock()
defer evnot.Unlock()
delete(evnot.bucketRulesMap, bucketName)
}
// RemoveAllBucketTargets - closes and removes all notification targets.
func (evnot *EventNotifier) RemoveAllBucketTargets() {
evnot.Lock()
defer evnot.Unlock()
targetIDSet := event.NewTargetIDSet()
for k := range evnot.targetList.TargetMap() {
targetIDSet[k] = struct{}{}
}
evnot.targetList.Remove(targetIDSet)
}
// Send - sends the event to all registered notification targets
func (evnot *EventNotifier) Send(args eventArgs) {
evnot.RLock()
targetIDSet := evnot.bucketRulesMap[args.BucketName].Match(args.EventName, args.Object.Name)
evnot.RUnlock()
if len(targetIDSet) == 0 {
return
}
// If MINIO_API_SYNC_EVENTS is set, send events synchronously.
evnot.targetList.Send(args.ToEvent(true), targetIDSet, evnot.targetResCh, globalAPIConfig.isSyncEventsEnabled())
}
type eventArgs struct {
EventName event.Name
BucketName string
Object ObjectInfo
ReqParams map[string]string
RespElements map[string]string
Host string
UserAgent string
}
// ToEvent - converts to notification event.
func (args eventArgs) ToEvent(escape bool) event.Event {
eventTime := UTCNow()
uniqueID := fmt.Sprintf("%X", eventTime.UnixNano())
respElements := map[string]string{
"x-amz-request-id": args.RespElements["requestId"],
"x-amz-id-2": args.RespElements["nodeId"],
"x-minio-origin-endpoint": func() string {
if globalMinioEndpoint != "" {
return globalMinioEndpoint
}
return getAPIEndpoints()[0]
}(), // MinIO specific custom elements.
}
// Add deployment as part of response elements.
respElements["x-minio-deployment-id"] = globalDeploymentID
if args.RespElements["content-length"] != "" {
respElements["content-length"] = args.RespElements["content-length"]
}
keyName := args.Object.Name
if escape {
keyName = url.QueryEscape(args.Object.Name)
}
newEvent := event.Event{
EventVersion: "2.0",
EventSource: "minio:s3",
AwsRegion: args.ReqParams["region"],
EventTime: eventTime.Format(event.AMZTimeFormat),
EventName: args.EventName,
UserIdentity: event.Identity{PrincipalID: args.ReqParams["principalId"]},
RequestParameters: args.ReqParams,
ResponseElements: respElements,
S3: event.Metadata{
SchemaVersion: "1.0",
ConfigurationID: "Config",
Bucket: event.Bucket{
Name: args.BucketName,
OwnerIdentity: event.Identity{PrincipalID: args.ReqParams["principalId"]},
ARN: policy.ResourceARNPrefix + args.BucketName,
},
Object: event.Object{
Key: keyName,
VersionID: args.Object.VersionID,
Sequencer: uniqueID,
},
},
Source: event.Source{
Host: args.Host,
UserAgent: args.UserAgent,
},
}
if args.EventName != event.ObjectRemovedDelete && args.EventName != event.ObjectRemovedDeleteMarkerCreated {
newEvent.S3.Object.ETag = args.Object.ETag
newEvent.S3.Object.Size = args.Object.Size
newEvent.S3.Object.ContentType = args.Object.ContentType
newEvent.S3.Object.UserMetadata = make(map[string]string, len(args.Object.UserDefined))
for k, v := range args.Object.UserDefined {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
continue
}
newEvent.S3.Object.UserMetadata[k] = v
}
}
return newEvent
}
func sendEvent(args eventArgs) {
// avoid generating a notification for REPLICA creation event.
if _, ok := args.ReqParams[xhttp.MinIOSourceReplicationRequest]; ok {
return
}
args.Object.Size, _ = args.Object.GetActualSize()
// remove sensitive encryption entries in metadata.
crypto.RemoveSensitiveEntries(args.Object.UserDefined)
crypto.RemoveInternalEntries(args.Object.UserDefined)
if globalHTTPListen.NumSubscribers(pubsub.MaskFromMaskable(args.EventName)) > 0 {
globalHTTPListen.Publish(args.ToEvent(false))
}
globalEventNotifier.Send(args)
}
<file_sep>#!/bin/bash -e
set -E
set -o pipefail
set -x
WORK_DIR="$PWD/.verify-$RANDOM"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO_OLD=("$PWD/minio.RELEASE.2020-10-28T08-16-50Z" --config-dir "$MINIO_CONFIG_DIR" server)
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
fi
function download_old_release() {
if [ ! -f minio.RELEASE.2020-10-28T08-16-50Z ]; then
curl --silent -O https://dl.minio.io/server/minio/release/linux-amd64/archive/minio.RELEASE.2020-10-28T08-16-50Z
chmod a+x minio.RELEASE.2020-10-28T08-16-50Z
fi
}
function verify_rewrite() {
start_port=$1
export MINIO_ACCESS_KEY=minio
export MINIO_SECRET_KEY=minio123
export MC_HOST_minio="http://minio:[email protected]:${start_port}/"
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
export MINIO_CI_CD=1
MC_BUILD_DIR="mc-$RANDOM"
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
echo "failed to download https://github.com/minio/mc"
purge "${MC_BUILD_DIR}"
exit 1
fi
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
# remove mc source.
purge "${MC_BUILD_DIR}"
"${MINIO_OLD[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 10
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
"${WORK_DIR}/mc" mb minio/healing-rewrite-bucket --quiet --with-lock
"${WORK_DIR}/mc" cp \
buildscripts/verify-build.sh \
minio/healing-rewrite-bucket/ \
--disable-multipart --quiet
"${WORK_DIR}/mc" cp \
buildscripts/verify-build.sh \
minio/healing-rewrite-bucket/ \
--disable-multipart --quiet
"${WORK_DIR}/mc" cp \
buildscripts/verify-build.sh \
minio/healing-rewrite-bucket/ \
--disable-multipart --quiet
kill ${pid}
sleep 3
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 10
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
go build ./docs/debugging/s3-check-md5/
if ! ./s3-check-md5 \
-debug \
-versions \
-access-key minio \
-secret-key minio123 \
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
mkdir -p inspects
(
cd inspects
"${WORK_DIR}/mc" admin inspect minio/healing-rewrite-bucket/verify-build.sh/**
)
"${WORK_DIR}/mc" mb play/inspects
"${WORK_DIR}/mc" mirror inspects play/inspects
purge "$WORK_DIR"
exit 1
fi
go run ./buildscripts/heal-manual.go "127.0.0.1:${start_port}" "minio" "minio123"
sleep 1
if ! ./s3-check-md5 \
-debug \
-versions \
-access-key minio \
-secret-key minio123 \
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
mkdir -p inspects
(
cd inspects
"${WORK_DIR}/mc" admin inspect minio/healing-rewrite-bucket/verify-build.sh/**
)
"${WORK_DIR}/mc" mb play/inspects
"${WORK_DIR}/mc" mirror inspects play/inspects
purge "$WORK_DIR"
exit 1
fi
kill ${pid}
}
function main() {
download_old_release
start_port=$(shuf -i 10000-65000 -n 1)
verify_rewrite ${start_port}
}
function purge() {
rm -rf "$1"
}
(main "$@")
rv=$?
purge "$WORK_DIR"
exit "$rv"
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// # This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package http
import (
"context"
"crypto/tls"
"crypto/x509"
"net"
"net/http"
"syscall"
"time"
"github.com/minio/pkg/certs"
"github.com/rs/dnscache"
)
// tlsClientSessionCacheSize is the cache size for client sessions.
var tlsClientSessionCacheSize = 100
// ConnSettings - contains connection settings.
type ConnSettings struct {
// If this is non-nil, DNSCache and DialTimeout are ignored.
DialContext func(ctx context.Context, network, addr string) (net.Conn, error)
// Dial settings, used if DialContext is nil.
DNSCache *dnscache.Resolver
DialTimeout time.Duration
// TLS Settings
RootCAs *x509.CertPool
CipherSuites []uint16
CurvePreferences []tls.CurveID
// HTTP2
EnableHTTP2 bool
// TCP Options
TCPOptions TCPOptions
}
func (s ConnSettings) getDefaultTransport() *http.Transport {
dialContext := s.DialContext
if dialContext == nil {
dialContext = DialContextWithDNSCache(s.DNSCache, NewInternodeDialContext(s.DialTimeout, s.TCPOptions))
}
tlsClientConfig := tls.Config{
RootCAs: s.RootCAs,
CipherSuites: s.CipherSuites,
CurvePreferences: s.CurvePreferences,
ClientSessionCache: tls.NewLRUClientSessionCache(tlsClientSessionCacheSize),
}
// For more details about various values used here refer
// https://golang.org/pkg/net/http/#Transport documentation
tr := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: dialContext,
MaxIdleConnsPerHost: 1024,
WriteBufferSize: 32 << 10, // 32KiB moving up from 4KiB default
ReadBufferSize: 32 << 10, // 32KiB moving up from 4KiB default
IdleConnTimeout: 15 * time.Second,
ResponseHeaderTimeout: 15 * time.Minute, // Conservative timeout is the default (for MinIO internode)
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 10 * time.Second,
TLSClientConfig: &tlsClientConfig,
ForceAttemptHTTP2: s.EnableHTTP2,
// Go net/http automatically unzip if content-type is
// gzip disable this feature, as we are always interested
// in raw stream.
DisableCompression: true,
}
// https://github.com/golang/go/issues/23559
// https://github.com/golang/go/issues/42534
// https://github.com/golang/go/issues/43989
// https://github.com/golang/go/issues/33425
// https://github.com/golang/go/issues/29246
// if tlsConfig != nil {
// trhttp2, _ := http2.ConfigureTransports(tr)
// if trhttp2 != nil {
// // ReadIdleTimeout is the timeout after which a health check using ping
// // frame will be carried out if no frame is received on the
// // connection. 5 minutes is sufficient time for any idle connection.
// trhttp2.ReadIdleTimeout = 5 * time.Minute
// // PingTimeout is the timeout after which the connection will be closed
// // if a response to Ping is not received.
// trhttp2.PingTimeout = dialTimeout
// // DisableCompression, if true, prevents the Transport from
// // requesting compression with an "Accept-Encoding: gzip"
// trhttp2.DisableCompression = true
// }
// }
return tr
}
// NewInternodeHTTPTransport returns transport for internode MinIO connections.
func (s ConnSettings) NewInternodeHTTPTransport() func() http.RoundTripper {
tr := s.getDefaultTransport()
// Settings specific to internode requests.
tr.TLSHandshakeTimeout = 15 * time.Second
tr.ExpectContinueTimeout = 15 * time.Second
return func() http.RoundTripper {
return tr
}
}
// NewCustomHTTPProxyTransport is used only for proxied requests, specifically
// only supports HTTP/1.1
func (s ConnSettings) NewCustomHTTPProxyTransport() func() *http.Transport {
s.EnableHTTP2 = false
tr := s.getDefaultTransport()
// Settings specific to proxied requests.
tr.ResponseHeaderTimeout = 30 * time.Minute
return func() *http.Transport {
return tr
}
}
// NewHTTPTransportWithTimeout allows setting a timeout for response headers
func (s ConnSettings) NewHTTPTransportWithTimeout(timeout time.Duration) *http.Transport {
tr := s.getDefaultTransport()
// Settings specific to this transport.
tr.ResponseHeaderTimeout = timeout
return tr
}
// NewHTTPTransportWithClientCerts returns a new http configuration used for
// communicating with client cert authentication.
func (s ConnSettings) NewHTTPTransportWithClientCerts(ctx context.Context, clientCert, clientKey string) (*http.Transport, error) {
transport := s.NewHTTPTransportWithTimeout(1 * time.Minute)
if clientCert != "" && clientKey != "" {
c, err := certs.NewManager(ctx, clientCert, clientKey, tls.LoadX509KeyPair)
if err != nil {
return nil, err
}
if c != nil {
c.UpdateReloadDuration(10 * time.Second)
c.ReloadOnSignal(syscall.SIGHUP) // allow reloads upon SIGHUP
transport.TLSClientConfig.GetClientCertificate = c.GetClientCertificate
}
}
return transport, nil
}
// NewRemoteTargetHTTPTransport returns a new http configuration
// used while communicating with the remote replication targets.
func (s ConnSettings) NewRemoteTargetHTTPTransport() func() *http.Transport {
tr := s.getDefaultTransport()
tr.TLSHandshakeTimeout = 5 * time.Second
tr.ExpectContinueTimeout = 5 * time.Second
tr.ResponseHeaderTimeout = 0
return func() *http.Transport {
return tr
}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bufio"
"bytes"
"encoding/binary"
"flag"
"fmt"
"hash/crc32"
"log"
"os"
"strings"
"github.com/dchest/siphash"
"github.com/google/uuid"
)
// hashes the key returning an integer based on the input algorithm.
// This function currently supports
// - SIPMOD
func sipHashMod(key string, cardinality int, id [16]byte) int {
if cardinality <= 0 {
return -1
}
// use the faster version as per siphash docs
// https://github.com/dchest/siphash#usage
k0, k1 := binary.LittleEndian.Uint64(id[0:8]), binary.LittleEndian.Uint64(id[8:16])
sum64 := siphash.Hash(k0, k1, []byte(key))
return int(sum64 % uint64(cardinality))
}
// hashOrder - hashes input key to return consistent
// hashed integer slice. Returned integer order is salted
// with an input key. This results in consistent order.
// NOTE: collisions are fine, we are not looking for uniqueness
// in the slices returned.
func hashOrder(key string, cardinality int) []int {
if cardinality <= 0 {
// Returns an empty int slice for cardinality < 0.
return nil
}
nums := make([]int, cardinality)
keyCrc := crc32.Checksum([]byte(key), crc32.IEEETable)
start := int(keyCrc % uint32(cardinality))
for i := 1; i <= cardinality; i++ {
nums[i-1] = 1 + ((start + i) % cardinality)
}
return nums
}
var (
file, object, deploymentID, prefix string
setCount, shards int
verbose bool
)
func main() {
flag.StringVar(&file, "file", "", "Read all objects from file, newline separated")
flag.StringVar(&prefix, "prefix", "", "Add prefix to all objects")
flag.StringVar(&object, "object", "", "Select an object")
flag.StringVar(&deploymentID, "deployment-id", "", "MinIO deployment ID, obtained from 'format.json'")
flag.IntVar(&setCount, "set-count", 0, "Total set count")
flag.IntVar(&shards, "shards", 0, "Total shards count")
flag.BoolVar(&verbose, "v", false, "Display all objects")
flag.Parse()
if deploymentID == "" {
log.Fatalln("deployment ID is mandatory")
}
if setCount == 0 {
log.Fatalln("set count cannot be zero")
}
id := uuid.MustParse(deploymentID)
if file != "" {
distrib := make([][]string, setCount)
b, err := os.ReadFile(file)
if err != nil {
log.Fatalln(err)
}
b = bytes.ReplaceAll(b, []byte("\r"), []byte{})
sc := bufio.NewScanner(bytes.NewBuffer(b))
for sc.Scan() {
object = strings.TrimSpace(sc.Text())
set := sipHashMod(prefix+object, setCount, id)
distrib[set] = append(distrib[set], prefix+object)
}
for set, files := range distrib {
fmt.Println("Set:", set+1, "Objects:", len(files))
if !verbose {
continue
}
for _, s := range files {
fmt.Printf("\t%s\n", s)
}
}
os.Exit(0)
}
if object == "" {
log.Fatalln("object name is mandatory")
}
if shards != 0 {
fmt.Println("Erasure distribution for the object", hashOrder(prefix+object, shards))
}
fmt.Println("Erasure setNumber for the object", sipHashMod(prefix+object, setCount, id)+1)
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package openid
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"github.com/minio/minio/internal/arn"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/config/identity/openid/provider"
xhttp "github.com/minio/minio/internal/http"
xnet "github.com/minio/pkg/net"
)
type providerCfg struct {
// Used for user interface like console
DisplayName string
JWKS struct {
URL *xnet.URL
}
URL *xnet.URL
ClaimPrefix string
ClaimName string
ClaimUserinfo bool
RedirectURI string
RedirectURIDynamic bool
DiscoveryDoc DiscoveryDoc
ClientID string
ClientSecret string
RolePolicy string
roleArn arn.ARN
provider provider.Provider
}
func newProviderCfgFromConfig(getCfgVal func(cfgName string) string) providerCfg {
return providerCfg{
DisplayName: getCfgVal(DisplayName),
ClaimName: getCfgVal(ClaimName),
ClaimUserinfo: getCfgVal(ClaimUserinfo) == config.EnableOn,
ClaimPrefix: getCfgVal(ClaimPrefix),
RedirectURI: getCfgVal(RedirectURI),
RedirectURIDynamic: getCfgVal(RedirectURIDynamic) == config.EnableOn,
ClientID: getCfgVal(ClientID),
ClientSecret: getCfgVal(ClientSecret),
RolePolicy: getCfgVal(RolePolicy),
}
}
const (
keyCloakVendor = "keycloak"
)
// initializeProvider initializes if any additional vendor specific information
// was provided, initialization will return an error initial login fails.
func (p *providerCfg) initializeProvider(cfgGet func(string) string, transport http.RoundTripper) error {
vendor := cfgGet(Vendor)
if vendor == "" {
return nil
}
var err error
switch vendor {
case keyCloakVendor:
adminURL := cfgGet(KeyCloakAdminURL)
realm := cfgGet(KeyCloakRealm)
p.provider, err = provider.KeyCloak(
provider.WithAdminURL(adminURL),
provider.WithOpenIDConfig(provider.DiscoveryDoc(p.DiscoveryDoc)),
provider.WithTransport(transport),
provider.WithRealm(realm),
)
return err
default:
return fmt.Errorf("Unsupport vendor %s", keyCloakVendor)
}
}
// GetRoleArn returns the role ARN.
func (p *providerCfg) GetRoleArn() string {
if p.RolePolicy == "" {
return ""
}
return p.roleArn.String()
}
// UserInfo returns claims for authenticated user from userInfo endpoint.
//
// Some OIDC implementations such as GitLab do not support
// claims as part of the normal oauth2 flow, instead rely
// on service providers making calls to IDP to fetch additional
// claims available from the UserInfo endpoint
func (p *providerCfg) UserInfo(ctx context.Context, accessToken string, transport http.RoundTripper) (map[string]interface{}, error) {
if p.JWKS.URL == nil || p.JWKS.URL.String() == "" {
return nil, errors.New("openid not configured")
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, p.DiscoveryDoc.UserInfoEndpoint, nil)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
if accessToken != "" {
req.Header.Set("Authorization", "Bearer "+accessToken)
}
client := &http.Client{
Transport: transport,
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer xhttp.DrainBody(resp.Body)
if resp.StatusCode != http.StatusOK {
// uncomment this for debugging when needed.
// reqBytes, _ := httputil.DumpRequest(req, false)
// fmt.Println(string(reqBytes))
// respBytes, _ := httputil.DumpResponse(resp, true)
// fmt.Println(string(respBytes))
return nil, errors.New(resp.Status)
}
claims := map[string]interface{}{}
if err = json.NewDecoder(resp.Body).Decode(&claims); err != nil {
// uncomment this for debugging when needed.
// reqBytes, _ := httputil.DumpRequest(req, false)
// fmt.Println(string(reqBytes))
// respBytes, _ := httputil.DumpResponse(resp, true)
// fmt.Println(string(respBytes))
return nil, err
}
return claims, nil
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package dsync
import (
"bytes"
"context"
"errors"
"net/http"
"net/url"
"time"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/rest"
)
// ReconnectRESTClient is a wrapper type for rest.Client which provides reconnect on first failure.
type ReconnectRESTClient struct {
u *url.URL
rest *rest.Client
}
// newClient constructs a ReconnectRESTClient object with addr and endpoint initialized.
// It _doesn't_ connect to the remote endpoint. See Call method to see when the
// connect happens.
func newClient(endpoint string) NetLocker {
u, err := url.Parse(endpoint)
if err != nil {
panic(err)
}
tr := &http.Transport{
Proxy: http.ProxyFromEnvironment,
MaxIdleConnsPerHost: 1024,
WriteBufferSize: 32 << 10, // 32KiB moving up from 4KiB default
ReadBufferSize: 32 << 10, // 32KiB moving up from 4KiB default
IdleConnTimeout: 15 * time.Second,
ResponseHeaderTimeout: 15 * time.Minute, // Set conservative timeouts for MinIO internode.
TLSHandshakeTimeout: 15 * time.Second,
ExpectContinueTimeout: 15 * time.Second,
// Go net/http automatically unzip if content-type is
// gzip disable this feature, as we are always interested
// in raw stream.
DisableCompression: true,
}
return &ReconnectRESTClient{
u: u,
rest: rest.NewClient(u, tr, nil),
}
}
// Close closes the underlying socket file descriptor.
func (restClient *ReconnectRESTClient) IsOnline() bool {
// If rest client has not connected yet there is nothing to close.
return restClient.rest != nil
}
func (restClient *ReconnectRESTClient) IsLocal() bool {
return false
}
// Close closes the underlying socket file descriptor.
func (restClient *ReconnectRESTClient) Close() error {
return nil
}
var (
errLockConflict = errors.New("lock conflict")
errLockNotFound = errors.New("lock not found")
)
func toLockError(err error) error {
if err == nil {
return nil
}
switch err.Error() {
case errLockConflict.Error():
return errLockConflict
case errLockNotFound.Error():
return errLockNotFound
}
return err
}
// Call makes a REST call to the remote endpoint using the msgp codec
func (restClient *ReconnectRESTClient) Call(method string, args LockArgs) (status bool, err error) {
buf, err := args.MarshalMsg(nil)
if err != nil {
return false, err
}
body := bytes.NewReader(buf)
respBody, err := restClient.rest.Call(context.Background(), method,
url.Values{}, body, body.Size())
defer xhttp.DrainBody(respBody)
switch toLockError(err) {
case nil:
return true, nil
case errLockConflict, errLockNotFound:
return false, nil
default:
return false, err
}
}
func (restClient *ReconnectRESTClient) RLock(ctx context.Context, args LockArgs) (status bool, err error) {
return restClient.Call("/v1/rlock", args)
}
func (restClient *ReconnectRESTClient) Lock(ctx context.Context, args LockArgs) (status bool, err error) {
return restClient.Call("/v1/lock", args)
}
func (restClient *ReconnectRESTClient) RUnlock(ctx context.Context, args LockArgs) (status bool, err error) {
return restClient.Call("/v1/runlock", args)
}
func (restClient *ReconnectRESTClient) Unlock(ctx context.Context, args LockArgs) (status bool, err error) {
return restClient.Call("/v1/unlock", args)
}
func (restClient *ReconnectRESTClient) Refresh(ctx context.Context, args LockArgs) (refreshed bool, err error) {
return restClient.Call("/v1/refresh", args)
}
func (restClient *ReconnectRESTClient) ForceUnlock(ctx context.Context, args LockArgs) (reply bool, err error) {
return restClient.Call("/v1/force-unlock", args)
}
func (restClient *ReconnectRESTClient) String() string {
return restClient.u.String()
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"encoding/binary"
"errors"
"fmt"
"io"
"math"
"math/rand"
"net/http"
"net/url"
"path"
"reflect"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/dustin/go-humanize"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/encrypt"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/internal/amztime"
"github.com/minio/minio/internal/bucket/bandwidth"
objectlock "github.com/minio/minio/internal/bucket/object/lock"
"github.com/minio/minio/internal/bucket/replication"
"github.com/minio/minio/internal/config/storageclass"
"github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/workers"
"github.com/zeebo/xxh3"
)
const (
throttleDeadline = 1 * time.Hour
// ReplicationReset has reset id and timestamp of last reset operation
ReplicationReset = "replication-reset"
// ReplicationStatus has internal replication status - stringified representation of target's replication status for all replication
// activity initiated from this cluster
ReplicationStatus = "replication-status"
// ReplicationTimestamp - the last time replication was initiated on this cluster for this object version
ReplicationTimestamp = "replication-timestamp"
// ReplicaStatus - this header is present if a replica was received by this cluster for this object version
ReplicaStatus = "replica-status"
// ReplicaTimestamp - the last time a replica was received by this cluster for this object version
ReplicaTimestamp = "replica-timestamp"
// TaggingTimestamp - the last time a tag metadata modification happened on this cluster for this object version
TaggingTimestamp = "tagging-timestamp"
// ObjectLockRetentionTimestamp - the last time a object lock metadata modification happened on this cluster for this object version
ObjectLockRetentionTimestamp = "objectlock-retention-timestamp"
// ObjectLockLegalHoldTimestamp - the last time a legal hold metadata modification happened on this cluster for this object version
ObjectLockLegalHoldTimestamp = "objectlock-legalhold-timestamp"
// ReplicationWorkerMultiplier is suggested worker multiplier if traffic exceeds replication worker capacity
ReplicationWorkerMultiplier = 1.5
)
func isReplicationEnabled(ctx context.Context, bucketName string) bool {
rc, _ := getReplicationConfig(ctx, bucketName)
return rc != nil
}
// gets replication config associated to a given bucket name.
func getReplicationConfig(ctx context.Context, bucketName string) (rc *replication.Config, err error) {
rCfg, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucketName)
if err != nil {
if errors.Is(err, BucketReplicationConfigNotFound{Bucket: bucketName}) || errors.Is(err, errInvalidArgument) {
return rCfg, err
}
logger.CriticalIf(ctx, err)
}
return rCfg, err
}
// validateReplicationDestination returns error if replication destination bucket missing or not configured
// It also returns true if replication destination is same as this server.
func validateReplicationDestination(ctx context.Context, bucket string, rCfg *replication.Config, checkRemote bool) (bool, APIError) {
var arns []string
if rCfg.RoleArn != "" {
arns = append(arns, rCfg.RoleArn)
} else {
for _, rule := range rCfg.Rules {
arns = append(arns, rule.Destination.String())
}
}
var sameTarget bool
for _, arnStr := range arns {
arn, err := madmin.ParseARN(arnStr)
if err != nil {
return sameTarget, errorCodes.ToAPIErrWithErr(ErrBucketRemoteArnInvalid, err)
}
if arn.Type != madmin.ReplicationService {
return sameTarget, toAPIError(ctx, BucketRemoteArnTypeInvalid{Bucket: bucket})
}
clnt := globalBucketTargetSys.GetRemoteTargetClient(ctx, arnStr)
if clnt == nil {
return sameTarget, toAPIError(ctx, BucketRemoteTargetNotFound{Bucket: bucket})
}
if checkRemote { // validate remote bucket
found, err := clnt.BucketExists(ctx, arn.Bucket)
if err != nil {
return sameTarget, errorCodes.ToAPIErrWithErr(ErrRemoteDestinationNotFoundError, err)
}
if !found {
return sameTarget, errorCodes.ToAPIErrWithErr(ErrRemoteDestinationNotFoundError, BucketRemoteTargetNotFound{Bucket: arn.Bucket})
}
if ret, err := globalBucketObjectLockSys.Get(bucket); err == nil {
if ret.LockEnabled {
lock, _, _, _, err := clnt.GetObjectLockConfig(ctx, arn.Bucket)
if err != nil {
return sameTarget, errorCodes.ToAPIErrWithErr(ErrReplicationDestinationMissingLock, err)
}
if lock != objectlock.Enabled {
return sameTarget, errorCodes.ToAPIErrWithErr(ErrReplicationDestinationMissingLock, nil)
}
}
}
}
// validate replication ARN against target endpoint
c := globalBucketTargetSys.GetRemoteTargetClient(ctx, arnStr)
if c != nil {
if err := checkRemoteEndpoint(ctx, c.EndpointURL()); err != nil {
switch err.(type) {
case BucketRemoteIdenticalToSource:
return true, errorCodes.ToAPIErrWithErr(ErrBucketRemoteIdenticalToSource, fmt.Errorf("remote target endpoint %s is self referential", c.EndpointURL().String()))
default:
}
}
if c.EndpointURL().String() == clnt.EndpointURL().String() {
selfTarget, _ := isLocalHost(clnt.EndpointURL().Hostname(), clnt.EndpointURL().Port(), globalMinioPort)
if !sameTarget {
sameTarget = selfTarget
}
continue
}
}
}
if len(arns) == 0 {
return false, toAPIError(ctx, BucketRemoteTargetNotFound{Bucket: bucket})
}
return sameTarget, toAPIError(ctx, nil)
}
// performs a http request to remote endpoint to check if deployment id of remote endpoint is same as
// local cluster deployment id. This is to prevent replication to self, especially in case of a loadbalancer
// in front of MinIO.
func checkRemoteEndpoint(ctx context.Context, epURL *url.URL) error {
reqURL := &url.URL{
Scheme: epURL.Scheme,
Host: epURL.Host,
Path: healthCheckPathPrefix + healthCheckReadinessPath,
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, reqURL.String(), nil)
if err != nil {
return err
}
client := &http.Client{
Transport: NewHTTPTransport(),
Timeout: 10 * time.Second,
}
resp, err := client.Do(req)
if err != nil {
return err
}
if err == nil {
// Drain the connection.
xhttp.DrainBody(resp.Body)
}
if resp != nil {
amzid := resp.Header.Get(xhttp.AmzRequestHostID)
if _, ok := globalNodeNamesHex[amzid]; ok {
return BucketRemoteIdenticalToSource{
Endpoint: epURL.String(),
}
}
}
return nil
}
type mustReplicateOptions struct {
meta map[string]string
status replication.StatusType
opType replication.Type
replicationRequest bool // incoming request is a replication request
}
func (o mustReplicateOptions) ReplicationStatus() (s replication.StatusType) {
if rs, ok := o.meta[xhttp.AmzBucketReplicationStatus]; ok {
return replication.StatusType(rs)
}
return s
}
func (o mustReplicateOptions) isExistingObjectReplication() bool {
return o.opType == replication.ExistingObjectReplicationType
}
func (o mustReplicateOptions) isMetadataReplication() bool {
return o.opType == replication.MetadataReplicationType
}
func getMustReplicateOptions(o ObjectInfo, op replication.Type, opts ObjectOptions) mustReplicateOptions {
if !op.Valid() {
op = replication.ObjectReplicationType
if o.metadataOnly {
op = replication.MetadataReplicationType
}
}
meta := cloneMSS(o.UserDefined)
if o.UserTags != "" {
meta[xhttp.AmzObjectTagging] = o.UserTags
}
return mustReplicateOptions{
meta: meta,
status: o.ReplicationStatus,
opType: op,
replicationRequest: opts.ReplicationRequest,
}
}
// mustReplicate returns 2 booleans - true if object meets replication criteria and true if replication is to be done in
// a synchronous manner.
func mustReplicate(ctx context.Context, bucket, object string, mopts mustReplicateOptions) (dsc ReplicateDecision) {
// object layer not initialized we return with no decision.
if newObjectLayerFn() == nil {
return
}
// Disable server-side replication on object prefixes which are excluded
// from versioning via the MinIO bucket versioning extension.
if !globalBucketVersioningSys.PrefixEnabled(bucket, object) {
return
}
replStatus := mopts.ReplicationStatus()
if replStatus == replication.Replica && !mopts.isMetadataReplication() {
return
}
if mopts.replicationRequest { // incoming replication request on target cluster
return
}
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil {
return
}
opts := replication.ObjectOpts{
Name: object,
SSEC: crypto.SSEC.IsEncrypted(mopts.meta),
Replica: replStatus == replication.Replica,
ExistingObject: mopts.isExistingObjectReplication(),
}
tagStr, ok := mopts.meta[xhttp.AmzObjectTagging]
if ok {
opts.UserTags = tagStr
}
tgtArns := cfg.FilterTargetArns(opts)
for _, tgtArn := range tgtArns {
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, tgtArn)
// the target online status should not be used here while deciding
// whether to replicate as the target could be temporarily down
opts.TargetArn = tgtArn
replicate := cfg.Replicate(opts)
var synchronous bool
if tgt != nil {
synchronous = tgt.replicateSync
}
dsc.Set(newReplicateTargetDecision(tgtArn, replicate, synchronous))
}
return dsc
}
// Standard headers that needs to be extracted from User metadata.
var standardHeaders = []string{
xhttp.ContentType,
xhttp.CacheControl,
xhttp.ContentEncoding,
xhttp.ContentLanguage,
xhttp.ContentDisposition,
xhttp.AmzStorageClass,
xhttp.AmzObjectTagging,
xhttp.AmzBucketReplicationStatus,
xhttp.AmzObjectLockMode,
xhttp.AmzObjectLockRetainUntilDate,
xhttp.AmzObjectLockLegalHold,
xhttp.AmzTagCount,
xhttp.AmzServerSideEncryption,
}
// returns true if any of the objects being deleted qualifies for replication.
func hasReplicationRules(ctx context.Context, bucket string, objects []ObjectToDelete) bool {
c, err := getReplicationConfig(ctx, bucket)
if err != nil || c == nil {
return false
}
for _, obj := range objects {
if c.HasActiveRules(obj.ObjectName, true) {
return true
}
}
return false
}
// isStandardHeader returns true if header is a supported header and not a custom header
func isStandardHeader(matchHeaderKey string) bool {
return equals(matchHeaderKey, standardHeaders...)
}
// returns whether object version is a deletemarker and if object qualifies for replication
func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelete, oi ObjectInfo, delOpts ObjectOptions, gerr error) (dsc ReplicateDecision) {
rcfg, err := getReplicationConfig(ctx, bucket)
if err != nil || rcfg == nil {
return
}
// If incoming request is a replication request, it does not need to be re-replicated.
if delOpts.ReplicationRequest {
return
}
// Skip replication if this object's prefix is excluded from being
// versioned.
if !delOpts.Versioned {
return
}
opts := replication.ObjectOpts{
Name: dobj.ObjectName,
SSEC: crypto.SSEC.IsEncrypted(oi.UserDefined),
UserTags: oi.UserTags,
DeleteMarker: oi.DeleteMarker,
VersionID: dobj.VersionID,
OpType: replication.DeleteReplicationType,
}
tgtArns := rcfg.FilterTargetArns(opts)
if len(tgtArns) > 0 {
dsc.targetsMap = make(map[string]replicateTargetDecision, len(tgtArns))
var sync, replicate bool
for _, tgtArn := range tgtArns {
opts.TargetArn = tgtArn
replicate = rcfg.Replicate(opts)
// when incoming delete is removal of a delete marker(a.k.a versioned delete),
// GetObjectInfo returns extra information even though it returns errFileNotFound
if gerr != nil {
validReplStatus := false
switch oi.TargetReplicationStatus(tgtArn) {
case replication.Pending, replication.Completed, replication.Failed:
validReplStatus = true
}
if oi.DeleteMarker && (validReplStatus || replicate) {
dsc.Set(newReplicateTargetDecision(tgtArn, replicate, sync))
continue
} else {
// can be the case that other cluster is down and duplicate `mc rm --vid`
// is issued - this still needs to be replicated back to the other target
replicate = oi.VersionPurgeStatus == Pending || oi.VersionPurgeStatus == Failed
dsc.Set(newReplicateTargetDecision(tgtArn, replicate, sync))
continue
}
}
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, tgtArn)
// the target online status should not be used here while deciding
// whether to replicate deletes as the target could be temporarily down
tgtDsc := newReplicateTargetDecision(tgtArn, false, false)
if tgt != nil {
tgtDsc = newReplicateTargetDecision(tgtArn, replicate, tgt.replicateSync)
}
dsc.Set(tgtDsc)
}
}
return dsc
}
// replicate deletes to the designated replication target if replication configuration
// has delete marker replication or delete replication (MinIO extension to allow deletes where version id
// is specified) enabled.
// Similar to bucket replication for PUT operation, soft delete (a.k.a setting delete marker) and
// permanent deletes (by specifying a version ID in the delete operation) have three states "Pending", "Complete"
// and "Failed" to mark the status of the replication of "DELETE" operation. All failed operations can
// then be retried by healing. In the case of permanent deletes, until the replication is completed on the
// target cluster, the object version is marked deleted on the source and hidden from listing. It is permanently
// deleted from the source when the VersionPurgeStatus changes to "Complete", i.e after replication succeeds
// on target.
func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, objectAPI ObjectLayer) {
var replicationStatus replication.StatusType
bucket := dobj.Bucket
versionID := dobj.DeleteMarkerVersionID
if versionID == "" {
versionID = dobj.VersionID
}
defer func() {
replStatus := string(replicationStatus)
auditLogInternal(context.Background(), AuditLogOptions{
Event: dobj.EventType,
APIName: ReplicateDeleteAPI,
Bucket: bucket,
Object: dobj.ObjectName,
VersionID: versionID,
Status: replStatus,
})
}()
rcfg, err := getReplicationConfig(ctx, bucket)
if err != nil || rcfg == nil {
logger.LogIf(ctx, err)
sendEvent(eventArgs{
BucketName: bucket,
Object: ObjectInfo{
Bucket: bucket,
Name: dobj.ObjectName,
VersionID: versionID,
DeleteMarker: dobj.DeleteMarker,
},
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
EventName: event.ObjectReplicationNotTracked,
})
return
}
dsc, err := parseReplicateDecision(dobj.ReplicationState.ReplicateDecisionStr)
if err != nil {
logger.LogIf(ctx, err)
sendEvent(eventArgs{
BucketName: bucket,
Object: ObjectInfo{
Bucket: bucket,
Name: dobj.ObjectName,
VersionID: versionID,
DeleteMarker: dobj.DeleteMarker,
},
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
EventName: event.ObjectReplicationNotTracked,
})
return
}
// Lock the object name before starting replication operation.
// Use separate lock that doesn't collide with regular objects.
lk := objectAPI.NewNSLock(bucket, "/[replicate]/"+dobj.ObjectName)
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
if err != nil {
globalReplicationPool.queueMRFSave(dobj.ToMRFEntry())
logger.LogIf(ctx, fmt.Errorf("failed to get lock for object: %s bucket:%s arn:%s", dobj.ObjectName, bucket, dobj.TargetArn))
sendEvent(eventArgs{
BucketName: bucket,
Object: ObjectInfo{
Bucket: bucket,
Name: dobj.ObjectName,
VersionID: versionID,
DeleteMarker: dobj.DeleteMarker,
},
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
EventName: event.ObjectReplicationNotTracked,
})
return
}
ctx = lkctx.Context()
defer lk.Unlock(lkctx)
var wg sync.WaitGroup
var rinfos replicatedInfos
rinfos.Targets = make([]replicatedTargetInfo, len(dsc.targetsMap))
idx := -1
for tgtArn := range dsc.targetsMap {
idx++
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, tgtArn)
if tgt == nil {
logger.LogIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtArn))
sendEvent(eventArgs{
BucketName: bucket,
Object: ObjectInfo{
Bucket: bucket,
Name: dobj.ObjectName,
VersionID: versionID,
DeleteMarker: dobj.DeleteMarker,
},
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
EventName: event.ObjectReplicationNotTracked,
})
continue
}
if tgt := dsc.targetsMap[tgtArn]; !tgt.Replicate {
continue
}
// if dobj.TargetArn is not empty string, this is a case of specific target being re-synced.
if dobj.TargetArn != "" && dobj.TargetArn != tgt.ARN {
continue
}
wg.Add(1)
go func(index int, tgt *TargetClient) {
defer wg.Done()
rinfo := replicateDeleteToTarget(ctx, dobj, tgt)
rinfos.Targets[index] = rinfo
}(idx, tgt)
}
wg.Wait()
replicationStatus = rinfos.ReplicationStatus()
prevStatus := dobj.DeleteMarkerReplicationStatus()
if dobj.VersionID != "" {
prevStatus = replication.StatusType(dobj.VersionPurgeStatus())
replicationStatus = replication.StatusType(rinfos.VersionPurgeStatus())
}
// to decrement pending count later.
for _, rinfo := range rinfos.Targets {
if rinfo.ReplicationStatus != rinfo.PrevReplicationStatus {
globalReplicationStats.Update(dobj.Bucket, rinfo.Arn, 0, 0, replicationStatus,
prevStatus, replication.DeleteReplicationType)
}
}
eventName := event.ObjectReplicationComplete
if replicationStatus == replication.Failed {
eventName = event.ObjectReplicationFailed
globalReplicationPool.queueMRFSave(dobj.ToMRFEntry())
}
drs := getReplicationState(rinfos, dobj.ReplicationState, dobj.VersionID)
if replicationStatus != prevStatus {
drs.ReplicationTimeStamp = UTCNow()
}
dobjInfo, err := objectAPI.DeleteObject(ctx, bucket, dobj.ObjectName, ObjectOptions{
VersionID: versionID,
MTime: dobj.DeleteMarkerMTime.Time,
DeleteReplication: drs,
Versioned: globalBucketVersioningSys.PrefixEnabled(bucket, dobj.ObjectName),
// Objects matching prefixes should not leave delete markers,
// dramatically reduces namespace pollution while keeping the
// benefits of replication, make sure to apply version suspension
// only at bucket level instead.
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
})
if err != nil && !isErrVersionNotFound(err) { // VersionNotFound would be reported by pool that object version is missing on.
sendEvent(eventArgs{
BucketName: bucket,
Object: ObjectInfo{
Bucket: bucket,
Name: dobj.ObjectName,
VersionID: versionID,
DeleteMarker: dobj.DeleteMarker,
},
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
EventName: eventName,
})
} else {
sendEvent(eventArgs{
BucketName: bucket,
Object: dobjInfo,
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
EventName: eventName,
})
}
}
func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationInfo, tgt *TargetClient) (rinfo replicatedTargetInfo) {
versionID := dobj.DeleteMarkerVersionID
if versionID == "" {
versionID = dobj.VersionID
}
rinfo = dobj.ReplicationState.targetState(tgt.ARN)
rinfo.OpType = dobj.OpType
defer func() {
if rinfo.ReplicationStatus == replication.Completed && tgt.ResetID != "" && dobj.OpType == replication.ExistingObjectReplicationType {
rinfo.ResyncTimestamp = fmt.Sprintf("%s;%s", UTCNow().Format(http.TimeFormat), tgt.ResetID)
}
}()
if dobj.VersionID == "" && rinfo.PrevReplicationStatus == replication.Completed && dobj.OpType != replication.ExistingObjectReplicationType {
rinfo.ReplicationStatus = rinfo.PrevReplicationStatus
return
}
if dobj.VersionID != "" && rinfo.VersionPurgeStatus == Complete {
return
}
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", dobj.Bucket, tgt.ARN), "replication-target-offline-delete"+tgt.ARN)
sendEvent(eventArgs{
BucketName: dobj.Bucket,
Object: ObjectInfo{
Bucket: dobj.Bucket,
Name: dobj.ObjectName,
VersionID: dobj.VersionID,
DeleteMarker: dobj.DeleteMarker,
},
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
EventName: event.ObjectReplicationNotTracked,
})
if dobj.VersionID == "" {
rinfo.ReplicationStatus = replication.Failed
} else {
rinfo.VersionPurgeStatus = Failed
}
return
}
// early return if already replicated delete marker for existing object replication/ healing delete markers
if dobj.DeleteMarkerVersionID != "" {
toi, err := tgt.StatObject(ctx, tgt.Bucket, dobj.ObjectName, minio.StatObjectOptions{
VersionID: versionID,
Internal: minio.AdvancedGetOptions{
ReplicationProxyRequest: "false",
IsReplicationReadyForDeleteMarker: true,
},
})
if isErrMethodNotAllowed(ErrorRespToObjectError(err, dobj.Bucket, dobj.ObjectName)) {
if dobj.VersionID == "" {
rinfo.ReplicationStatus = replication.Completed
return
}
}
if !isErrObjectNotFound(ErrorRespToObjectError(err, dobj.Bucket, dobj.ObjectName)) {
// mark delete marker replication as failed if target cluster not ready to receive
// this request yet (object version not replicated yet)
if err != nil && !toi.ReplicationReady {
rinfo.ReplicationStatus = replication.Failed
return
}
}
}
rmErr := tgt.RemoveObject(ctx, tgt.Bucket, dobj.ObjectName, minio.RemoveObjectOptions{
VersionID: versionID,
Internal: minio.AdvancedRemoveOptions{
ReplicationDeleteMarker: dobj.DeleteMarkerVersionID != "",
ReplicationMTime: dobj.DeleteMarkerMTime.Time,
ReplicationStatus: minio.ReplicationStatusReplica,
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
},
})
if rmErr != nil {
if dobj.VersionID == "" {
rinfo.ReplicationStatus = replication.Failed
} else {
rinfo.VersionPurgeStatus = Failed
}
logger.LogIf(ctx, fmt.Errorf("Unable to replicate delete marker to %s/%s(%s): %s", tgt.Bucket, dobj.ObjectName, versionID, rmErr))
} else {
if dobj.VersionID == "" {
rinfo.ReplicationStatus = replication.Completed
} else {
rinfo.VersionPurgeStatus = Complete
}
}
return
}
func getCopyObjMetadata(oi ObjectInfo, sc string) map[string]string {
meta := make(map[string]string, len(oi.UserDefined))
for k, v := range oi.UserDefined {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
continue
}
if equals(k, xhttp.AmzBucketReplicationStatus) {
continue
}
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
continue
}
meta[k] = v
}
if oi.ContentEncoding != "" {
meta[xhttp.ContentEncoding] = oi.ContentEncoding
}
if oi.ContentType != "" {
meta[xhttp.ContentType] = oi.ContentType
}
if oi.UserTags != "" {
meta[xhttp.AmzObjectTagging] = oi.UserTags
meta[xhttp.AmzTagDirective] = "REPLACE"
}
if sc == "" {
sc = oi.StorageClass
}
// drop non standard storage classes for tiering from replication
if sc != "" && (sc == storageclass.RRS || sc == storageclass.STANDARD) {
meta[xhttp.AmzStorageClass] = sc
}
meta[xhttp.MinIOSourceETag] = oi.ETag
meta[xhttp.MinIOSourceMTime] = oi.ModTime.UTC().Format(time.RFC3339Nano)
meta[xhttp.AmzBucketReplicationStatus] = replication.Replica.String()
return meta
}
type caseInsensitiveMap map[string]string
// Lookup map entry case insensitively.
func (m caseInsensitiveMap) Lookup(key string) (string, bool) {
if len(m) == 0 {
return "", false
}
for _, k := range []string{
key,
strings.ToLower(key),
http.CanonicalHeaderKey(key),
} {
v, ok := m[k]
if ok {
return v, ok
}
}
return "", false
}
func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts minio.PutObjectOptions, err error) {
meta := make(map[string]string)
for k, v := range objInfo.UserDefined {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
continue
}
if isStandardHeader(k) {
continue
}
meta[k] = v
}
if sc == "" && (objInfo.StorageClass == storageclass.STANDARD || objInfo.StorageClass == storageclass.RRS) {
sc = objInfo.StorageClass
}
putOpts = minio.PutObjectOptions{
UserMetadata: meta,
ContentType: objInfo.ContentType,
ContentEncoding: objInfo.ContentEncoding,
StorageClass: sc,
Internal: minio.AdvancedPutOptions{
SourceVersionID: objInfo.VersionID,
ReplicationStatus: minio.ReplicationStatusReplica,
SourceMTime: objInfo.ModTime,
SourceETag: objInfo.ETag,
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
},
}
if objInfo.UserTags != "" {
tag, _ := tags.ParseObjectTags(objInfo.UserTags)
if tag != nil {
putOpts.UserTags = tag.ToMap()
// set tag timestamp in opts
tagTimestamp := objInfo.ModTime
if tagTmstampStr, ok := objInfo.UserDefined[ReservedMetadataPrefixLower+TaggingTimestamp]; ok {
tagTimestamp, err = time.Parse(time.RFC3339Nano, tagTmstampStr)
if err != nil {
return putOpts, err
}
}
putOpts.Internal.TaggingTimestamp = tagTimestamp
}
}
lkMap := caseInsensitiveMap(objInfo.UserDefined)
if lang, ok := lkMap.Lookup(xhttp.ContentLanguage); ok {
putOpts.ContentLanguage = lang
}
if disp, ok := lkMap.Lookup(xhttp.ContentDisposition); ok {
putOpts.ContentDisposition = disp
}
if cc, ok := lkMap.Lookup(xhttp.CacheControl); ok {
putOpts.CacheControl = cc
}
if mode, ok := lkMap.Lookup(xhttp.AmzObjectLockMode); ok {
rmode := minio.RetentionMode(mode)
putOpts.Mode = rmode
}
if retainDateStr, ok := lkMap.Lookup(xhttp.AmzObjectLockRetainUntilDate); ok {
rdate, err := amztime.ISO8601Parse(retainDateStr)
if err != nil {
return putOpts, err
}
putOpts.RetainUntilDate = rdate
// set retention timestamp in opts
retTimestamp := objInfo.ModTime
if retainTmstampStr, ok := objInfo.UserDefined[ReservedMetadataPrefixLower+ObjectLockRetentionTimestamp]; ok {
retTimestamp, err = time.Parse(time.RFC3339Nano, retainTmstampStr)
if err != nil {
return putOpts, err
}
}
putOpts.Internal.RetentionTimestamp = retTimestamp
}
if lhold, ok := lkMap.Lookup(xhttp.AmzObjectLockLegalHold); ok {
putOpts.LegalHold = minio.LegalHoldStatus(lhold)
// set legalhold timestamp in opts
lholdTimestamp := objInfo.ModTime
if lholdTmstampStr, ok := objInfo.UserDefined[ReservedMetadataPrefixLower+ObjectLockLegalHoldTimestamp]; ok {
lholdTimestamp, err = time.Parse(time.RFC3339Nano, lholdTmstampStr)
if err != nil {
return putOpts, err
}
}
putOpts.Internal.LegalholdTimestamp = lholdTimestamp
}
if crypto.S3.IsEncrypted(objInfo.UserDefined) {
putOpts.ServerSideEncryption = encrypt.NewSSE()
}
return
}
type replicationAction string
const (
replicateMetadata replicationAction = "metadata"
replicateNone replicationAction = "none"
replicateAll replicationAction = "all"
)
// matches k1 with all keys, returns 'true' if one of them matches
func equals(k1 string, keys ...string) bool {
for _, k2 := range keys {
if strings.EqualFold(k1, k2) {
return true
}
}
return false
}
// returns replicationAction by comparing metadata between source and target
func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo, opType replication.Type) replicationAction {
// Avoid resyncing null versions created prior to enabling replication if target has a newer copy
if opType == replication.ExistingObjectReplicationType &&
oi1.ModTime.Unix() > oi2.LastModified.Unix() && oi1.VersionID == nullVersionID {
return replicateNone
}
sz, _ := oi1.GetActualSize()
// needs full replication
if oi1.ETag != oi2.ETag ||
oi1.VersionID != oi2.VersionID ||
sz != oi2.Size ||
oi1.DeleteMarker != oi2.IsDeleteMarker ||
oi1.ModTime.Unix() != oi2.LastModified.Unix() {
return replicateAll
}
if oi1.ContentType != oi2.ContentType {
return replicateMetadata
}
if oi1.ContentEncoding != "" {
enc, ok := oi2.Metadata[xhttp.ContentEncoding]
if !ok {
enc, ok = oi2.Metadata[strings.ToLower(xhttp.ContentEncoding)]
if !ok {
return replicateMetadata
}
}
if strings.Join(enc, ",") != oi1.ContentEncoding {
return replicateMetadata
}
}
t, _ := tags.ParseObjectTags(oi1.UserTags)
if !reflect.DeepEqual(oi2.UserTags, t.ToMap()) || (oi2.UserTagCount != len(t.ToMap())) {
return replicateMetadata
}
// Compare only necessary headers
compareKeys := []string{
"Expires",
"Cache-Control",
"Content-Language",
"Content-Disposition",
"X-Amz-Object-Lock-Mode",
"X-Amz-Object-Lock-Retain-Until-Date",
"X-Amz-Object-Lock-Legal-Hold",
"X-Amz-Website-Redirect-Location",
"X-Amz-Meta-",
}
// compare metadata on both maps to see if meta is identical
compareMeta1 := make(map[string]string)
for k, v := range oi1.UserDefined {
var found bool
for _, prefix := range compareKeys {
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
continue
}
found = true
break
}
if found {
compareMeta1[strings.ToLower(k)] = v
}
}
compareMeta2 := make(map[string]string)
for k, v := range oi2.Metadata {
var found bool
for _, prefix := range compareKeys {
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
continue
}
found = true
break
}
if found {
compareMeta2[strings.ToLower(k)] = strings.Join(v, ",")
}
}
if !reflect.DeepEqual(compareMeta1, compareMeta2) {
return replicateMetadata
}
return replicateNone
}
// replicateObject replicates the specified version of the object to destination bucket
// The source object is then updated to reflect the replication status.
func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI ObjectLayer) {
var replicationStatus replication.StatusType
defer func() {
if replicationStatus.Empty() {
// replication status is empty means
// replication was not attempted for some
// reason, notify the state of the object
// on disk.
replicationStatus = ri.ReplicationStatus
}
auditLogInternal(ctx, AuditLogOptions{
Event: ri.EventType,
APIName: ReplicateObjectAPI,
Bucket: ri.Bucket,
Object: ri.Name,
VersionID: ri.VersionID,
Status: replicationStatus.String(),
})
}()
objInfo := ri.ObjectInfo
bucket := objInfo.Bucket
object := objInfo.Name
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil {
logger.LogOnceIf(ctx, err, "get-replication-config"+bucket)
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return
}
tgtArns := cfg.FilterTargetArns(replication.ObjectOpts{
Name: object,
SSEC: crypto.SSEC.IsEncrypted(objInfo.UserDefined),
UserTags: objInfo.UserTags,
})
// Lock the object name before starting replication.
// Use separate lock that doesn't collide with regular objects.
lk := objectAPI.NewNSLock(bucket, "/[replicate]/"+object)
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
if err != nil {
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
globalReplicationPool.queueMRFSave(ri.ToMRFEntry())
logger.LogIf(ctx, fmt.Errorf("failed to get lock for object: %s bucket:%s arn:%s", object, bucket, ri.TargetArn))
return
}
ctx = lkctx.Context()
defer lk.Unlock(lkctx)
var wg sync.WaitGroup
var rinfos replicatedInfos
rinfos.Targets = make([]replicatedTargetInfo, len(tgtArns))
for i, tgtArn := range tgtArns {
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, tgtArn)
if tgt == nil {
logger.LogIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtArn))
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
continue
}
wg.Add(1)
go func(index int, tgt *TargetClient) {
defer wg.Done()
if ri.OpType == replication.ObjectReplicationType {
// all incoming calls go through optimized path.
rinfos.Targets[index] = ri.replicateObject(ctx, objectAPI, tgt)
} else {
rinfos.Targets[index] = ri.replicateAll(ctx, objectAPI, tgt)
}
}(i, tgt)
}
wg.Wait()
replicationStatus = rinfos.ReplicationStatus() // used in defer function
// FIXME: add support for missing replication events
// - event.ObjectReplicationMissedThreshold
// - event.ObjectReplicationReplicatedAfterThreshold
eventName := event.ObjectReplicationComplete
if replicationStatus == replication.Failed {
eventName = event.ObjectReplicationFailed
}
newReplStatusInternal := rinfos.ReplicationStatusInternal()
// Note that internal replication status(es) may match for previously replicated objects - in such cases
// metadata should be updated with last resync timestamp.
if objInfo.ReplicationStatusInternal != newReplStatusInternal || rinfos.ReplicationResynced() {
popts := ObjectOptions{
MTime: objInfo.ModTime,
VersionID: objInfo.VersionID,
EvalMetadataFn: func(oi *ObjectInfo) error {
oi.UserDefined[ReservedMetadataPrefixLower+ReplicationStatus] = newReplStatusInternal
oi.UserDefined[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano)
oi.UserDefined[xhttp.AmzBucketReplicationStatus] = string(rinfos.ReplicationStatus())
for _, rinfo := range rinfos.Targets {
if rinfo.ResyncTimestamp != "" {
oi.UserDefined[targetResetHeader(rinfo.Arn)] = rinfo.ResyncTimestamp
}
}
if objInfo.UserTags != "" {
oi.UserDefined[xhttp.AmzObjectTagging] = objInfo.UserTags
}
return nil
},
}
_, _ = objectAPI.PutObjectMetadata(ctx, bucket, object, popts)
opType := replication.MetadataReplicationType
if rinfos.Action() == replicateAll {
opType = replication.ObjectReplicationType
}
for _, rinfo := range rinfos.Targets {
if rinfo.ReplicationStatus != rinfo.PrevReplicationStatus {
globalReplicationStats.Update(bucket, rinfo.Arn, rinfo.Size, rinfo.Duration, rinfo.ReplicationStatus, rinfo.PrevReplicationStatus, opType)
}
}
}
sendEvent(eventArgs{
EventName: eventName,
BucketName: bucket,
Object: objInfo,
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
// re-queue failures once more - keep a retry count to avoid flooding the queue if
// the target site is down. Leave it to scanner to catch up instead.
if rinfos.ReplicationStatus() != replication.Completed {
ri.OpType = replication.HealReplicationType
ri.EventType = ReplicateMRF
ri.ReplicationStatusInternal = rinfos.ReplicationStatusInternal()
ri.RetryCount++
globalReplicationPool.queueMRFSave(ri.ToMRFEntry())
}
}
// replicateObject replicates object data for specified version of the object to destination bucket
// The source object is then updated to reflect the replication status.
func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI ObjectLayer, tgt *TargetClient) (rinfo replicatedTargetInfo) {
startTime := time.Now()
objInfo := ri.ObjectInfo.Clone()
bucket := objInfo.Bucket
object := objInfo.Name
sz, _ := objInfo.GetActualSize()
rAction := replicateAll
rinfo = replicatedTargetInfo{
Size: sz,
Arn: tgt.ARN,
PrevReplicationStatus: objInfo.TargetReplicationStatus(tgt.ARN),
ReplicationStatus: replication.Failed,
OpType: ri.OpType,
ReplicationAction: rAction,
}
if ri.ObjectInfo.TargetReplicationStatus(tgt.ARN) == replication.Completed && !ri.ExistingObjResync.Empty() && !ri.ExistingObjResync.mustResyncTarget(tgt.ARN) {
rinfo.ReplicationStatus = replication.Completed
rinfo.ReplicationResynced = true
return
}
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", bucket, tgt.ARN), "replication-target-offline-obj"+tgt.ARN)
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return
}
versioned := globalBucketVersioningSys.PrefixEnabled(bucket, object)
versionSuspended := globalBucketVersioningSys.PrefixSuspended(bucket, object)
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, ObjectOptions{
VersionID: objInfo.VersionID,
Versioned: versioned,
VersionSuspended: versionSuspended,
})
if err != nil {
if !isErrVersionNotFound(err) && !isErrObjectNotFound(err) {
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
logger.LogIf(ctx, fmt.Errorf("unable to update replicate metadata for %s/%s(%s): %w", bucket, object, objInfo.VersionID, err))
}
return
}
defer gr.Close()
objInfo = gr.ObjInfo
// make sure we have the latest metadata for metrics calculation
rinfo.PrevReplicationStatus = objInfo.TargetReplicationStatus(tgt.ARN)
size, err := objInfo.GetActualSize()
if err != nil {
logger.LogIf(ctx, err)
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return
}
if tgt.Bucket == "" {
logger.LogIf(ctx, fmt.Errorf("unable to replicate object %s(%s), bucket is empty", objInfo.Name, objInfo.VersionID))
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return rinfo
}
defer func() {
if rinfo.ReplicationStatus == replication.Completed && ri.OpType == replication.ExistingObjectReplicationType && tgt.ResetID != "" {
rinfo.ResyncTimestamp = fmt.Sprintf("%s;%s", UTCNow().Format(http.TimeFormat), tgt.ResetID)
rinfo.ReplicationResynced = true
}
rinfo.Duration = time.Since(startTime)
}()
rinfo.ReplicationStatus = replication.Completed
rinfo.Size = size
rinfo.ReplicationAction = rAction
// use core client to avoid doing multipart on PUT
c := &minio.Core{Client: tgt.Client}
putOpts, err := putReplicationOpts(ctx, tgt.StorageClass, objInfo)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("failed to get target for replication bucket:%s err:%w", bucket, err))
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return
}
var headerSize int
for k, v := range putOpts.Header() {
headerSize += len(k) + len(v)
}
opts := &bandwidth.MonitorReaderOptions{
Bucket: objInfo.Bucket,
TargetARN: tgt.ARN,
HeaderSize: headerSize,
}
newCtx := ctx
if globalBucketMonitor.IsThrottled(bucket, tgt.ARN) {
var cancel context.CancelFunc
newCtx, cancel = context.WithTimeout(ctx, throttleDeadline)
defer cancel()
}
r := bandwidth.NewMonitoredReader(newCtx, globalBucketMonitor, gr, opts)
if objInfo.isMultipart() {
if err := replicateObjectWithMultipart(ctx, c, tgt.Bucket, object,
r, objInfo, putOpts); err != nil {
if minio.ToErrorResponse(err).Code != "PreconditionFailed" {
rinfo.ReplicationStatus = replication.Failed
logger.LogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): %s", bucket, objInfo.Name, objInfo.VersionID, err))
}
}
} else {
if _, err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts); err != nil {
if minio.ToErrorResponse(err).Code != "PreconditionFailed" {
rinfo.ReplicationStatus = replication.Failed
logger.LogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): %s", bucket, objInfo.Name, objInfo.VersionID, err))
}
}
}
return
}
// replicateAll replicates metadata for specified version of the object to destination bucket
// if the destination version is missing it automatically does fully copy as well.
// The source object is then updated to reflect the replication status.
func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI ObjectLayer, tgt *TargetClient) (rinfo replicatedTargetInfo) {
startTime := time.Now()
objInfo := ri.ObjectInfo.Clone()
bucket := objInfo.Bucket
object := objInfo.Name
sz, _ := objInfo.GetActualSize()
// set defaults for replication action based on operation being performed - actual
// replication action can only be determined after stat on remote. This default is
// needed for updating replication metrics correctly when target is offline.
rAction := replicateMetadata
rinfo = replicatedTargetInfo{
Size: sz,
Arn: tgt.ARN,
PrevReplicationStatus: objInfo.TargetReplicationStatus(tgt.ARN),
ReplicationStatus: replication.Failed,
OpType: ri.OpType,
ReplicationAction: rAction,
}
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", bucket, tgt.ARN), "replication-target-offline-all"+tgt.ARN)
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return
}
versioned := globalBucketVersioningSys.PrefixEnabled(bucket, object)
versionSuspended := globalBucketVersioningSys.PrefixSuspended(bucket, object)
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, ObjectOptions{
VersionID: objInfo.VersionID,
Versioned: versioned,
VersionSuspended: versionSuspended,
})
if err != nil {
if !isErrVersionNotFound(err) && !isErrObjectNotFound(err) {
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
logger.LogIf(ctx, fmt.Errorf("unable to update replicate metadata for %s/%s(%s): %w", bucket, object, objInfo.VersionID, err))
}
return
}
defer gr.Close()
objInfo = gr.ObjInfo
// make sure we have the latest metadata for metrics calculation
rinfo.PrevReplicationStatus = objInfo.TargetReplicationStatus(tgt.ARN)
// use latest ObjectInfo to check if previous replication attempt succeeded
if objInfo.TargetReplicationStatus(tgt.ARN) == replication.Completed && !ri.ExistingObjResync.Empty() && !ri.ExistingObjResync.mustResyncTarget(tgt.ARN) {
rinfo.ReplicationStatus = replication.Completed
rinfo.ReplicationResynced = true
return
}
size, err := objInfo.GetActualSize()
if err != nil {
logger.LogIf(ctx, err)
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return
}
if tgt.Bucket == "" {
logger.LogIf(ctx, fmt.Errorf("unable to replicate object %s(%s), bucket is empty", objInfo.Name, objInfo.VersionID))
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return rinfo
}
defer func() {
if rinfo.ReplicationStatus == replication.Completed && ri.OpType == replication.ExistingObjectReplicationType && tgt.ResetID != "" {
rinfo.ResyncTimestamp = fmt.Sprintf("%s;%s", UTCNow().Format(http.TimeFormat), tgt.ResetID)
rinfo.ReplicationResynced = true
}
rinfo.Duration = time.Since(startTime)
}()
rAction = replicateAll
oi, cerr := tgt.StatObject(ctx, tgt.Bucket, object, minio.StatObjectOptions{
VersionID: objInfo.VersionID,
Internal: minio.AdvancedGetOptions{
ReplicationProxyRequest: "false",
},
})
if cerr == nil {
rAction = getReplicationAction(objInfo, oi, ri.OpType)
rinfo.ReplicationStatus = replication.Completed
if rAction == replicateNone {
if ri.OpType == replication.ExistingObjectReplicationType &&
objInfo.ModTime.Unix() > oi.LastModified.Unix() && objInfo.VersionID == nullVersionID {
logger.LogIf(ctx, fmt.Errorf("unable to replicate %s/%s (null). Newer version exists on target", bucket, object))
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
}
// object with same VersionID already exists, replication kicked off by
// PutObject might have completed
if objInfo.TargetReplicationStatus(tgt.ARN) == replication.Pending || objInfo.TargetReplicationStatus(tgt.ARN) == replication.Failed || ri.OpType == replication.ExistingObjectReplicationType {
// if metadata is not updated for some reason after replication, such as
// 503 encountered while updating metadata - make sure to set ReplicationStatus
// as Completed.
//
// Note: Replication Stats would have been updated despite metadata update failure.
rinfo.ReplicationAction = rAction
rinfo.ReplicationStatus = replication.Completed
}
return
}
}
// if target returns error other than NoSuchKey, defer replication attempt
if cerr != nil {
errResp := minio.ToErrorResponse(cerr)
switch errResp.Code {
case "NoSuchKey", "NoSuchVersion", "SlowDownRead":
rAction = replicateAll
default:
logger.LogIf(ctx, fmt.Errorf("unable to replicate %s/%s (%s). Target (%s) returned %s error on HEAD",
bucket, object, objInfo.VersionID, tgt.EndpointURL(), cerr))
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return
}
}
rinfo.ReplicationStatus = replication.Completed
rinfo.Size = size
rinfo.ReplicationAction = rAction
// use core client to avoid doing multipart on PUT
c := &minio.Core{Client: tgt.Client}
if rAction != replicateAll {
// replicate metadata for object tagging/copy with metadata replacement
srcOpts := minio.CopySrcOptions{
Bucket: tgt.Bucket,
Object: object,
VersionID: objInfo.VersionID,
}
dstOpts := minio.PutObjectOptions{
Internal: minio.AdvancedPutOptions{
SourceVersionID: objInfo.VersionID,
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
},
}
if _, err = c.CopyObject(ctx, tgt.Bucket, object, tgt.Bucket, object, getCopyObjMetadata(objInfo, tgt.StorageClass), srcOpts, dstOpts); err != nil {
rinfo.ReplicationStatus = replication.Failed
logger.LogIf(ctx, fmt.Errorf("unable to replicate metadata for object %s/%s(%s): %s", bucket, objInfo.Name, objInfo.VersionID, err))
}
} else {
var putOpts minio.PutObjectOptions
putOpts, err = putReplicationOpts(ctx, tgt.StorageClass, objInfo)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("failed to get target for replication bucket:%s err:%w", bucket, err))
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return
}
var headerSize int
for k, v := range putOpts.Header() {
headerSize += len(k) + len(v)
}
opts := &bandwidth.MonitorReaderOptions{
Bucket: objInfo.Bucket,
TargetARN: tgt.ARN,
HeaderSize: headerSize,
}
newCtx := ctx
if globalBucketMonitor.IsThrottled(bucket, tgt.ARN) {
var cancel context.CancelFunc
newCtx, cancel = context.WithTimeout(ctx, throttleDeadline)
defer cancel()
}
r := bandwidth.NewMonitoredReader(newCtx, globalBucketMonitor, gr, opts)
if objInfo.isMultipart() {
if err := replicateObjectWithMultipart(ctx, c, tgt.Bucket, object,
r, objInfo, putOpts); err != nil {
if minio.ToErrorResponse(err).Code != "PreconditionFailed" {
rinfo.ReplicationStatus = replication.Failed
logger.LogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): %s", bucket, objInfo.Name, objInfo.VersionID, err))
} else {
rinfo.ReplicationStatus = replication.Completed
}
}
} else {
if _, err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts); err != nil {
if minio.ToErrorResponse(err).Code != "PreconditionFailed" {
rinfo.ReplicationStatus = replication.Failed
logger.LogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): %s", bucket, objInfo.Name, objInfo.VersionID, err))
} else {
rinfo.ReplicationStatus = replication.Completed
}
}
}
}
return
}
func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, object string, r io.Reader, objInfo ObjectInfo, opts minio.PutObjectOptions) (err error) {
var uploadedParts []minio.CompletePart
// new multipart must not set mtime as it may lead to erroneous cleanups at various intervals.
opts.Internal.SourceMTime = time.Time{} // this value is saved properly in CompleteMultipartUpload()
uploadID, err := c.NewMultipartUpload(context.Background(), bucket, object, opts)
if err != nil {
return err
}
defer func() {
if err != nil {
// block and abort remote upload upon failure.
attempts := 1
for attempts <= 3 {
aerr := c.AbortMultipartUpload(ctx, bucket, object, uploadID)
if aerr == nil {
return
}
logger.LogIf(ctx,
fmt.Errorf("Trying %s: Unable to cleanup failed multipart replication %s on remote %s/%s: %w - this may consume space on remote cluster",
humanize.Ordinal(attempts), uploadID, bucket, object, aerr))
attempts++
time.Sleep(time.Second)
}
}
}()
var (
hr *hash.Reader
pInfo minio.ObjectPart
)
for _, partInfo := range objInfo.Parts {
hr, err = hash.NewReader(io.LimitReader(r, partInfo.ActualSize), partInfo.ActualSize, "", "", partInfo.ActualSize)
if err != nil {
return err
}
popts := minio.PutObjectPartOptions{
SSE: opts.ServerSideEncryption,
}
pInfo, err = c.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, hr, partInfo.ActualSize, popts)
if err != nil {
return err
}
if pInfo.Size != partInfo.ActualSize {
return fmt.Errorf("Part size mismatch: got %d, want %d", pInfo.Size, partInfo.ActualSize)
}
uploadedParts = append(uploadedParts, minio.CompletePart{
PartNumber: pInfo.PartNumber,
ETag: pInfo.ETag,
})
}
_, err = c.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, minio.PutObjectOptions{
Internal: minio.AdvancedPutOptions{
SourceMTime: objInfo.ModTime,
// always set this to distinguish between `mc mirror` replication and serverside
ReplicationRequest: true,
},
})
return err
}
// filterReplicationStatusMetadata filters replication status metadata for COPY
func filterReplicationStatusMetadata(metadata map[string]string) map[string]string {
// Copy on write
dst := metadata
var copied bool
delKey := func(key string) {
if _, ok := metadata[key]; !ok {
return
}
if !copied {
dst = make(map[string]string, len(metadata))
for k, v := range metadata {
dst[k] = v
}
copied = true
}
delete(dst, key)
}
delKey(xhttp.AmzBucketReplicationStatus)
return dst
}
// DeletedObjectReplicationInfo has info on deleted object
type DeletedObjectReplicationInfo struct {
DeletedObject
Bucket string
EventType string
OpType replication.Type
ResetID string
TargetArn string
}
// ToMRFEntry returns the relevant info needed by MRF
func (di DeletedObjectReplicationInfo) ToMRFEntry() MRFReplicateEntry {
versionID := di.DeleteMarkerVersionID
if versionID == "" {
versionID = di.VersionID
}
return MRFReplicateEntry{
Bucket: di.Bucket,
Object: di.ObjectName,
versionID: versionID,
}
}
// Replication specific APIName
const (
ReplicateObjectAPI = "ReplicateObject"
ReplicateDeleteAPI = "ReplicateDelete"
)
const (
// ReplicateQueued - replication being queued trail
ReplicateQueued = "replicate:queue"
// ReplicateExisting - audit trail for existing objects replication
ReplicateExisting = "replicate:existing"
// ReplicateExistingDelete - audit trail for delete replication triggered for existing delete markers
ReplicateExistingDelete = "replicate:existing:delete"
// ReplicateMRF - audit trail for replication from Most Recent Failures (MRF) queue
ReplicateMRF = "replicate:mrf"
// ReplicateIncoming - audit trail of inline replication
ReplicateIncoming = "replicate:incoming"
// ReplicateIncomingDelete - audit trail of inline replication of deletes.
ReplicateIncomingDelete = "replicate:incoming:delete"
// ReplicateHeal - audit trail for healing of failed/pending replications
ReplicateHeal = "replicate:heal"
// ReplicateHealDelete - audit trail of healing of failed/pending delete replications.
ReplicateHealDelete = "replicate:heal:delete"
)
var (
globalReplicationPool *ReplicationPool
globalReplicationStats *ReplicationStats
)
// ReplicationPool describes replication pool
type ReplicationPool struct {
// atomic ops:
activeWorkers int32
activeMRFWorkers int32
objLayer ObjectLayer
ctx context.Context
priority string
mu sync.RWMutex
resyncer *replicationResyncer
// workers:
workers []chan ReplicationWorkerOperation
existingWorkers chan ReplicationWorkerOperation
// mrf:
mrfWorkerKillCh chan struct{}
mrfReplicaCh chan ReplicationWorkerOperation
mrfSaveCh chan MRFReplicateEntry
mrfStopCh chan struct{}
mrfWorkerSize int
saveStateCh chan struct{}
}
// ReplicationWorkerOperation is a shared interface of replication operations.
type ReplicationWorkerOperation interface {
ToMRFEntry() MRFReplicateEntry
}
const (
// WorkerMaxLimit max number of workers per node for "fast" mode
WorkerMaxLimit = 500
// WorkerMinLimit min number of workers per node for "slow" mode
WorkerMinLimit = 50
// WorkerAutoDefault is default number of workers for "auto" mode
WorkerAutoDefault = 100
// MRFWorkerMaxLimit max number of mrf workers per node for "fast" mode
MRFWorkerMaxLimit = 8
// MRFWorkerMinLimit min number of mrf workers per node for "slow" mode
MRFWorkerMinLimit = 2
// MRFWorkerAutoDefault is default number of mrf workers for "auto" mode
MRFWorkerAutoDefault = 4
)
// NewReplicationPool creates a pool of replication workers of specified size
func NewReplicationPool(ctx context.Context, o ObjectLayer, opts replicationPoolOpts) *ReplicationPool {
var workers, failedWorkers int
priority := "auto"
if opts.Priority != "" {
priority = opts.Priority
}
switch priority {
case "fast":
workers = WorkerMaxLimit
failedWorkers = MRFWorkerMaxLimit
case "slow":
workers = WorkerMinLimit
failedWorkers = MRFWorkerMinLimit
default:
workers = WorkerAutoDefault
failedWorkers = MRFWorkerAutoDefault
}
pool := &ReplicationPool{
workers: make([]chan ReplicationWorkerOperation, 0, workers),
existingWorkers: make(chan ReplicationWorkerOperation, 100000),
mrfReplicaCh: make(chan ReplicationWorkerOperation, 100000),
mrfWorkerKillCh: make(chan struct{}, failedWorkers),
resyncer: newresyncer(),
mrfSaveCh: make(chan MRFReplicateEntry, 100000),
mrfStopCh: make(chan struct{}, 1),
saveStateCh: make(chan struct{}, 1),
ctx: ctx,
objLayer: o,
priority: priority,
}
pool.ResizeWorkers(workers, 0)
pool.ResizeFailedWorkers(failedWorkers)
go pool.AddWorker(pool.existingWorkers, nil)
go pool.resyncer.PersistToDisk(ctx, o)
go pool.processMRF()
go pool.persistMRF()
go pool.saveStatsToDisk()
return pool
}
// AddMRFWorker adds a pending/failed replication worker to handle requests that could not be queued
// to the other workers
func (p *ReplicationPool) AddMRFWorker() {
for {
select {
case <-p.ctx.Done():
return
case oi, ok := <-p.mrfReplicaCh:
if !ok {
return
}
switch v := oi.(type) {
case ReplicateObjectInfo:
atomic.AddInt32(&p.activeMRFWorkers, 1)
replicateObject(p.ctx, v, p.objLayer)
atomic.AddInt32(&p.activeMRFWorkers, -1)
default:
logger.LogOnceIf(p.ctx, fmt.Errorf("unknown mrf replication type: %T", oi), "unknown-mrf-replicate-type")
}
case <-p.mrfWorkerKillCh:
return
}
}
}
// AddWorker adds a replication worker to the pool.
// An optional pointer to a tracker that will be atomically
// incremented when operations are running can be provided.
func (p *ReplicationPool) AddWorker(input <-chan ReplicationWorkerOperation, opTracker *int32) {
for {
select {
case <-p.ctx.Done():
return
case oi, ok := <-input:
if !ok {
return
}
switch v := oi.(type) {
case ReplicateObjectInfo:
if opTracker != nil {
atomic.AddInt32(opTracker, 1)
}
replicateObject(p.ctx, v, p.objLayer)
if opTracker != nil {
atomic.AddInt32(opTracker, -1)
}
case DeletedObjectReplicationInfo:
if opTracker != nil {
atomic.AddInt32(opTracker, 1)
}
replicateDelete(p.ctx, v, p.objLayer)
if opTracker != nil {
atomic.AddInt32(opTracker, -1)
}
default:
logger.LogOnceIf(p.ctx, fmt.Errorf("unknown replication type: %T", oi), "unknown-replicate-type")
}
}
}
}
// ActiveWorkers returns the number of active workers handling replication traffic.
func (p *ReplicationPool) ActiveWorkers() int {
return int(atomic.LoadInt32(&p.activeWorkers))
}
// ActiveMRFWorkers returns the number of active workers handling replication failures.
func (p *ReplicationPool) ActiveMRFWorkers() int {
return int(atomic.LoadInt32(&p.activeMRFWorkers))
}
// ResizeWorkers sets replication workers pool to new size.
// checkOld can be set to an expected value.
// If the worker count changed
func (p *ReplicationPool) ResizeWorkers(n, checkOld int) {
p.mu.Lock()
defer p.mu.Unlock()
if (checkOld > 0 && len(p.workers) != checkOld) || n == len(p.workers) || n < 1 {
// Either already satisfied or worker count changed while we waited for the lock.
return
}
for len(p.workers) < n {
input := make(chan ReplicationWorkerOperation, 10000)
p.workers = append(p.workers, input)
go p.AddWorker(input, &p.activeWorkers)
}
for len(p.workers) > n {
worker := p.workers[len(p.workers)-1]
p.workers = p.workers[:len(p.workers)-1]
close(worker)
}
}
// ResizeWorkerPriority sets replication failed workers pool size
func (p *ReplicationPool) ResizeWorkerPriority(pri string) {
var workers, mrfWorkers int
p.mu.Lock()
switch pri {
case "fast":
workers = WorkerMaxLimit
mrfWorkers = MRFWorkerMaxLimit
case "slow":
workers = WorkerMinLimit
mrfWorkers = MRFWorkerMinLimit
default:
workers = WorkerAutoDefault
mrfWorkers = MRFWorkerAutoDefault
if len(p.workers) < WorkerAutoDefault {
workers = int(math.Min(float64(len(p.workers)+1), WorkerAutoDefault))
}
if p.mrfWorkerSize < MRFWorkerAutoDefault {
mrfWorkers = int(math.Min(float64(p.mrfWorkerSize+1), MRFWorkerAutoDefault))
}
}
p.priority = pri
p.mu.Unlock()
p.ResizeWorkers(workers, 0)
p.ResizeFailedWorkers(mrfWorkers)
}
// ResizeFailedWorkers sets replication failed workers pool size
func (p *ReplicationPool) ResizeFailedWorkers(n int) {
p.mu.Lock()
defer p.mu.Unlock()
for p.mrfWorkerSize < n {
p.mrfWorkerSize++
go p.AddMRFWorker()
}
for p.mrfWorkerSize > n {
p.mrfWorkerSize--
go func() { p.mrfWorkerKillCh <- struct{}{} }()
}
}
// getWorkerCh gets a worker channel deterministically based on bucket and object names.
// Must be able to grab read lock from p.
func (p *ReplicationPool) getWorkerCh(bucket, object string) chan<- ReplicationWorkerOperation {
h := xxh3.HashString(bucket + object)
p.mu.RLock()
defer p.mu.RUnlock()
if len(p.workers) == 0 {
return nil
}
return p.workers[h%uint64(len(p.workers))]
}
func (p *ReplicationPool) queueReplicaTask(ri ReplicateObjectInfo) {
if p == nil {
return
}
var ch, healCh chan<- ReplicationWorkerOperation
switch ri.OpType {
case replication.ExistingObjectReplicationType:
ch = p.existingWorkers
case replication.HealReplicationType:
ch = p.mrfReplicaCh
healCh = p.getWorkerCh(ri.Name, ri.Bucket)
default:
ch = p.getWorkerCh(ri.Name, ri.Bucket)
}
if ch == nil && healCh == nil {
return
}
select {
case <-p.ctx.Done():
case healCh <- ri:
case ch <- ri:
default:
globalReplicationPool.queueMRFSave(ri.ToMRFEntry())
p.mu.RLock()
prio := p.priority
p.mu.RUnlock()
switch prio {
case "fast":
logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to keep up with incoming traffic"), string(replicationSubsystem))
case "slow":
logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to keep up with incoming traffic - we recommend increasing replication priority with `mc admin config set api replication_priority=auto`"), string(replicationSubsystem))
default:
if p.ActiveWorkers() < WorkerMaxLimit {
p.mu.RLock()
workers := int(math.Min(float64(len(p.workers)+1), WorkerMaxLimit))
existing := len(p.workers)
p.mu.RUnlock()
p.ResizeWorkers(workers, existing)
}
if p.ActiveMRFWorkers() < MRFWorkerMaxLimit {
p.mu.RLock()
workers := int(math.Min(float64(p.mrfWorkerSize+1), MRFWorkerMaxLimit))
p.mu.RUnlock()
p.ResizeFailedWorkers(workers)
}
}
}
}
func queueReplicateDeletesWrapper(doi DeletedObjectReplicationInfo, existingObjectResync ResyncDecision) {
for k, v := range existingObjectResync.targets {
if v.Replicate {
doi.ResetID = v.ResetID
doi.TargetArn = k
globalReplicationPool.queueReplicaDeleteTask(doi)
}
}
}
func (p *ReplicationPool) queueReplicaDeleteTask(doi DeletedObjectReplicationInfo) {
if p == nil {
return
}
var ch chan<- ReplicationWorkerOperation
switch doi.OpType {
case replication.ExistingObjectReplicationType:
ch = p.existingWorkers
case replication.HealReplicationType:
fallthrough
default:
ch = p.getWorkerCh(doi.Bucket, doi.ObjectName)
}
select {
case <-p.ctx.Done():
case ch <- doi:
default:
globalReplicationPool.queueMRFSave(doi.ToMRFEntry())
p.mu.RLock()
prio := p.priority
p.mu.RUnlock()
switch prio {
case "fast":
logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to keep up with incoming deletes"), string(replicationSubsystem))
case "slow":
logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to keep up with incoming deletes - we recommend increasing replication priority with `mc admin config set api replication_priority=auto`"), string(replicationSubsystem))
default:
if p.ActiveWorkers() < WorkerMaxLimit {
p.mu.RLock()
workers := int(math.Min(float64(len(p.workers)+1), WorkerMaxLimit))
existing := len(p.workers)
p.mu.RUnlock()
p.ResizeWorkers(workers, existing)
}
}
}
}
type replicationPoolOpts struct {
Priority string
}
func initBackgroundReplication(ctx context.Context, objectAPI ObjectLayer) {
globalReplicationPool = NewReplicationPool(ctx, objectAPI, replicationPoolOpts{
Priority: globalAPIConfig.getReplicationPriority(),
})
globalReplicationStats = NewReplicationStats(ctx, objectAPI)
go globalReplicationStats.loadInitialReplicationMetrics(ctx)
}
type proxyResult struct {
Proxy bool
Err error
}
// get Reader from replication target if active-active replication is in place and
// this node returns a 404
func proxyGetToReplicationTarget(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, _ http.Header, opts ObjectOptions, proxyTargets *madmin.BucketTargets) (gr *GetObjectReader, proxy proxyResult, err error) {
tgt, oi, proxy := proxyHeadToRepTarget(ctx, bucket, object, rs, opts, proxyTargets)
if !proxy.Proxy {
return nil, proxy, nil
}
fn, _, _, err := NewGetObjectReader(nil, oi, opts)
if err != nil {
return nil, proxy, err
}
gopts := minio.GetObjectOptions{
VersionID: opts.VersionID,
ServerSideEncryption: opts.ServerSideEncryption,
Internal: minio.AdvancedGetOptions{
ReplicationProxyRequest: "true",
},
PartNumber: opts.PartNumber,
}
// get correct offsets for encrypted object
if rs != nil {
h, err := rs.ToHeader()
if err != nil {
return nil, proxy, err
}
gopts.Set(xhttp.Range, h)
}
// Make sure to match ETag when proxying.
if err = gopts.SetMatchETag(oi.ETag); err != nil {
return nil, proxy, err
}
c := minio.Core{Client: tgt.Client}
obj, _, h, err := c.GetObject(ctx, tgt.Bucket, object, gopts)
if err != nil {
return nil, proxy, err
}
closeReader := func() { obj.Close() }
reader, err := fn(obj, h, closeReader)
if err != nil {
return nil, proxy, err
}
reader.ObjInfo = oi.Clone()
if rs != nil {
contentSize, err := parseSizeFromContentRange(h)
if err != nil {
return nil, proxy, err
}
reader.ObjInfo.Size = contentSize
}
return reader, proxyResult{Proxy: true}, nil
}
func getProxyTargets(ctx context.Context, bucket, object string, opts ObjectOptions) (tgts *madmin.BucketTargets) {
if opts.VersionSuspended {
return &madmin.BucketTargets{}
}
if opts.ProxyRequest || (opts.ProxyHeaderSet && !opts.ProxyRequest) {
return &madmin.BucketTargets{}
}
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil || cfg == nil {
return &madmin.BucketTargets{}
}
topts := replication.ObjectOpts{Name: object}
tgtArns := cfg.FilterTargetArns(topts)
tgts = &madmin.BucketTargets{Targets: make([]madmin.BucketTarget, len(tgtArns))}
for i, tgtArn := range tgtArns {
tgt := globalBucketTargetSys.GetRemoteBucketTargetByArn(ctx, bucket, tgtArn)
tgts.Targets[i] = tgt
}
return tgts
}
func proxyHeadToRepTarget(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, opts ObjectOptions, proxyTargets *madmin.BucketTargets) (tgt *TargetClient, oi ObjectInfo, proxy proxyResult) {
// this option is set when active-active replication is in place between site A -> B,
// and site B does not have the object yet.
if opts.ProxyRequest || (opts.ProxyHeaderSet && !opts.ProxyRequest) { // true only when site B sets MinIOSourceProxyRequest header
return nil, oi, proxy
}
for _, t := range proxyTargets.Targets {
tgt = globalBucketTargetSys.GetRemoteTargetClient(ctx, t.Arn)
if tgt == nil || globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
continue
}
// if proxying explicitly disabled on remote target
if tgt.disableProxy {
continue
}
gopts := minio.GetObjectOptions{
VersionID: opts.VersionID,
ServerSideEncryption: opts.ServerSideEncryption,
Internal: minio.AdvancedGetOptions{
ReplicationProxyRequest: "true",
},
PartNumber: opts.PartNumber,
}
if rs != nil {
h, err := rs.ToHeader()
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Invalid range header for %s/%s(%s) - %w", bucket, object, opts.VersionID, err))
continue
}
gopts.Set(xhttp.Range, h)
}
objInfo, err := tgt.StatObject(ctx, t.TargetBucket, object, gopts)
if err != nil {
if isErrInvalidRange(ErrorRespToObjectError(err, bucket, object)) {
return nil, oi, proxyResult{Err: err}
}
continue
}
tags, _ := tags.MapToObjectTags(objInfo.UserTags)
oi = ObjectInfo{
Bucket: bucket,
Name: object,
ModTime: objInfo.LastModified,
Size: objInfo.Size,
ETag: objInfo.ETag,
VersionID: objInfo.VersionID,
IsLatest: objInfo.IsLatest,
DeleteMarker: objInfo.IsDeleteMarker,
ContentType: objInfo.ContentType,
Expires: objInfo.Expires,
StorageClass: objInfo.StorageClass,
ReplicationStatusInternal: objInfo.ReplicationStatus,
UserTags: tags.String(),
ReplicationStatus: replication.StatusType(objInfo.ReplicationStatus),
}
oi.UserDefined = make(map[string]string, len(objInfo.Metadata))
for k, v := range objInfo.Metadata {
oi.UserDefined[k] = v[0]
}
ce, ok := oi.UserDefined[xhttp.ContentEncoding]
if !ok {
ce, ok = oi.UserDefined[strings.ToLower(xhttp.ContentEncoding)]
}
if ok {
oi.ContentEncoding = ce
}
return tgt, oi, proxyResult{Proxy: true}
}
return nil, oi, proxy
}
// get object info from replication target if active-active replication is in place and
// this node returns a 404
func proxyHeadToReplicationTarget(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, opts ObjectOptions, proxyTargets *madmin.BucketTargets) (oi ObjectInfo, proxy proxyResult) {
_, oi, proxy = proxyHeadToRepTarget(ctx, bucket, object, rs, opts, proxyTargets)
return oi, proxy
}
func scheduleReplication(ctx context.Context, objInfo ObjectInfo, o ObjectLayer, dsc ReplicateDecision, opType replication.Type) {
ri := ReplicateObjectInfo{ObjectInfo: objInfo, OpType: opType, Dsc: dsc, EventType: ReplicateIncoming}
if dsc.Synchronous() {
replicateObject(ctx, ri, o)
} else {
globalReplicationPool.queueReplicaTask(ri)
}
if sz, err := objInfo.GetActualSize(); err == nil {
for arn := range dsc.targetsMap {
globalReplicationStats.Update(objInfo.Bucket, arn, sz, 0, objInfo.ReplicationStatus, replication.StatusType(""), opType)
}
}
}
func scheduleReplicationDelete(ctx context.Context, dv DeletedObjectReplicationInfo, o ObjectLayer) {
globalReplicationPool.queueReplicaDeleteTask(dv)
for arn := range dv.ReplicationState.Targets {
globalReplicationStats.Update(dv.Bucket, arn, 0, 0, replication.Pending, replication.StatusType(""), replication.DeleteReplicationType)
}
for arn := range dv.ReplicationState.PurgeTargets {
globalReplicationStats.Update(dv.Bucket, arn, 0, 0, replication.Pending, replication.StatusType(""), replication.DeleteReplicationType)
}
}
type replicationConfig struct {
Config *replication.Config
remotes *madmin.BucketTargets
}
func (c replicationConfig) Empty() bool {
return c.Config == nil
}
func (c replicationConfig) Replicate(opts replication.ObjectOpts) bool {
return c.Config.Replicate(opts)
}
// Resync returns true if replication reset is requested
func (c replicationConfig) Resync(ctx context.Context, oi ObjectInfo, dsc *ReplicateDecision, tgtStatuses map[string]replication.StatusType) (r ResyncDecision) {
if c.Empty() {
return
}
// Now overlay existing object replication choices for target
if oi.DeleteMarker {
opts := replication.ObjectOpts{
Name: oi.Name,
SSEC: crypto.SSEC.IsEncrypted(oi.UserDefined),
UserTags: oi.UserTags,
DeleteMarker: oi.DeleteMarker,
VersionID: oi.VersionID,
OpType: replication.DeleteReplicationType,
ExistingObject: true,
}
tgtArns := c.Config.FilterTargetArns(opts)
// indicates no matching target with Existing object replication enabled.
if len(tgtArns) == 0 {
return
}
for _, t := range tgtArns {
opts.TargetArn = t
// Update replication decision for target based on existing object replciation rule.
dsc.Set(newReplicateTargetDecision(t, c.Replicate(opts), false))
}
return c.resync(oi, dsc, tgtStatuses)
}
// Ignore previous replication status when deciding if object can be re-replicated
objInfo := oi.Clone()
objInfo.ReplicationStatusInternal = ""
objInfo.VersionPurgeStatusInternal = ""
objInfo.ReplicationStatus = ""
objInfo.VersionPurgeStatus = ""
delete(objInfo.UserDefined, xhttp.AmzBucketReplicationStatus)
resyncdsc := mustReplicate(ctx, oi.Bucket, oi.Name, getMustReplicateOptions(objInfo, replication.ExistingObjectReplicationType, ObjectOptions{}))
dsc = &resyncdsc
return c.resync(oi, dsc, tgtStatuses)
}
// wrapper function for testability. Returns true if a new reset is requested on
// already replicated objects OR object qualifies for existing object replication
// and no reset requested.
func (c replicationConfig) resync(oi ObjectInfo, dsc *ReplicateDecision, tgtStatuses map[string]replication.StatusType) (r ResyncDecision) {
r = ResyncDecision{
targets: make(map[string]ResyncTargetDecision),
}
if c.remotes == nil {
return
}
for _, tgt := range c.remotes.Targets {
d, ok := dsc.targetsMap[tgt.Arn]
if !ok {
continue
}
if !d.Replicate {
continue
}
r.targets[d.Arn] = resyncTarget(oi, tgt.Arn, tgt.ResetID, tgt.ResetBeforeDate, tgtStatuses[tgt.Arn])
}
return
}
func targetResetHeader(arn string) string {
return fmt.Sprintf("%s-%s", ReservedMetadataPrefixLower+ReplicationReset, arn)
}
func resyncTarget(oi ObjectInfo, arn string, resetID string, resetBeforeDate time.Time, tgtStatus replication.StatusType) (rd ResyncTargetDecision) {
rd = ResyncTargetDecision{
ResetID: resetID,
ResetBeforeDate: resetBeforeDate,
}
rs, ok := oi.UserDefined[targetResetHeader(arn)]
if !ok {
rs, ok = oi.UserDefined[xhttp.MinIOReplicationResetStatus] // for backward compatibility
}
if !ok { // existing object replication is enabled and object version is unreplicated so far.
if resetID != "" && oi.ModTime.Before(resetBeforeDate) { // trigger replication if `mc replicate reset` requested
rd.Replicate = true
return
}
// For existing object reset - this condition is needed
rd.Replicate = tgtStatus == ""
return
}
if resetID == "" || resetBeforeDate.Equal(timeSentinel) { // no reset in progress
return
}
// if already replicated, return true if a new reset was requested.
splits := strings.SplitN(rs, ";", 2)
if len(splits) != 2 {
return
}
newReset := splits[1] != resetID
if !newReset && tgtStatus == replication.Completed {
// already replicated and no reset requested
return
}
rd.Replicate = newReset && oi.ModTime.Before(resetBeforeDate)
return
}
const resyncTimeInterval = time.Minute * 1
// PersistToDisk persists in-memory resync metadata stats to disk at periodic intervals
func (s *replicationResyncer) PersistToDisk(ctx context.Context, objectAPI ObjectLayer) {
resyncTimer := time.NewTimer(resyncTimeInterval)
defer resyncTimer.Stop()
// For each bucket name, store the last timestamp of the
// successful save of replication status in the backend disks.
lastResyncStatusSave := make(map[string]time.Time)
for {
select {
case <-resyncTimer.C:
s.RLock()
for bucket, brs := range s.statusMap {
var updt bool
// Save the replication status if one resync to any bucket target is still not finished
for _, st := range brs.TargetsMap {
if st.LastUpdate.Equal(timeSentinel) {
updt = true
break
}
}
// Save the replication status if a new stats update is found and not saved in the backend yet
if brs.LastUpdate.After(lastResyncStatusSave[bucket]) {
updt = true
}
if updt {
if err := saveResyncStatus(ctx, bucket, brs, objectAPI); err != nil {
logger.LogIf(ctx, fmt.Errorf("Could not save resync metadata to drive for %s - %w", bucket, err))
} else {
lastResyncStatusSave[bucket] = brs.LastUpdate
}
}
}
s.RUnlock()
resyncTimer.Reset(resyncTimeInterval)
case <-ctx.Done():
// server could be restarting - need
// to exit immediately
return
}
}
}
const (
resyncWorkerCnt = 10 // limit of number of bucket resyncs is progress at any given time
resyncParallelRoutines = 10 // number of parallel resync ops per bucket
)
func newresyncer() *replicationResyncer {
rs := replicationResyncer{
statusMap: make(map[string]BucketReplicationResyncStatus),
workerSize: resyncWorkerCnt,
resyncCancelCh: make(chan struct{}, resyncWorkerCnt),
workerCh: make(chan struct{}, resyncWorkerCnt),
}
for i := 0; i < rs.workerSize; i++ {
rs.workerCh <- struct{}{}
}
return &rs
}
// mark status of replication resync on remote target for the bucket
func (s *replicationResyncer) markStatus(status ResyncStatusType, opts resyncOpts) {
s.Lock()
defer s.Unlock()
m := s.statusMap[opts.bucket]
st := m.TargetsMap[opts.arn]
st.LastUpdate = UTCNow()
st.ResyncStatus = status
m.TargetsMap[opts.arn] = st
m.LastUpdate = UTCNow()
s.statusMap[opts.bucket] = m
}
// update replication resync stats for bucket's remote target
func (s *replicationResyncer) incStats(ts TargetReplicationResyncStatus, opts resyncOpts) {
s.Lock()
defer s.Unlock()
m := s.statusMap[opts.bucket]
st := m.TargetsMap[opts.arn]
st.Object = ts.Object
st.ReplicatedCount += ts.ReplicatedCount
st.FailedCount += ts.FailedCount
st.ReplicatedSize += ts.ReplicatedSize
st.FailedSize += ts.FailedSize
m.TargetsMap[opts.arn] = st
m.LastUpdate = UTCNow()
s.statusMap[opts.bucket] = m
}
// resyncBucket resyncs all qualifying objects as per replication rules for the target
// ARN
func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI ObjectLayer, heal bool, opts resyncOpts) {
select {
case <-s.workerCh: // block till a worker is available
case <-ctx.Done():
return
}
resyncStatus := ResyncFailed
defer func() {
s.markStatus(resyncStatus, opts)
globalSiteResyncMetrics.incBucket(opts, resyncStatus)
s.workerCh <- struct{}{}
}()
// Allocate new results channel to receive ObjectInfo.
objInfoCh := make(chan ObjectInfo)
cfg, err := getReplicationConfig(ctx, opts.bucket)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Replication resync of %s for arn %s failed with %w", opts.bucket, opts.arn, err))
return
}
tgts, err := globalBucketTargetSys.ListBucketTargets(ctx, opts.bucket)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Replication resync of %s for arn %s failed %w", opts.bucket, opts.arn, err))
return
}
rcfg := replicationConfig{
Config: cfg,
remotes: tgts,
}
tgtArns := cfg.FilterTargetArns(
replication.ObjectOpts{
OpType: replication.ResyncReplicationType,
TargetArn: opts.arn,
})
if len(tgtArns) != 1 {
logger.LogIf(ctx, fmt.Errorf("Replication resync failed for %s - arn specified %s is missing in the replication config", opts.bucket, opts.arn))
return
}
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, opts.arn)
if tgt == nil {
logger.LogIf(ctx, fmt.Errorf("Replication resync failed for %s - target could not be created for arn %s", opts.bucket, opts.arn))
return
}
// mark resync status as resync started
if !heal {
s.markStatus(ResyncStarted, opts)
}
// Walk through all object versions - Walk() is always in ascending order needed to ensure
// delete marker replicated to target after object version is first created.
if err := objectAPI.Walk(ctx, opts.bucket, "", objInfoCh, ObjectOptions{}); err != nil {
logger.LogIf(ctx, err)
return
}
s.RLock()
m := s.statusMap[opts.bucket]
st := m.TargetsMap[opts.arn]
s.RUnlock()
var lastCheckpoint string
if st.ResyncStatus == ResyncStarted || st.ResyncStatus == ResyncFailed {
lastCheckpoint = st.Object
}
workers, err := workers.New(resyncParallelRoutines)
for obj := range objInfoCh {
select {
case <-s.resyncCancelCh:
resyncStatus = ResyncCanceled
return
case <-ctx.Done():
return
default:
}
if heal && lastCheckpoint != "" && lastCheckpoint != obj.Name {
continue
}
lastCheckpoint = ""
obj := obj
workers.Take()
go func() {
defer workers.Give()
roi := getHealReplicateObjectInfo(obj, rcfg)
if !roi.ExistingObjResync.mustResync() {
return
}
traceFn := s.trace(tgt.ResetID, fmt.Sprintf("%s/%s (%s)", opts.bucket, roi.Name, roi.VersionID))
if roi.DeleteMarker || !roi.VersionPurgeStatus.Empty() {
versionID := ""
dmVersionID := ""
if roi.VersionPurgeStatus.Empty() {
dmVersionID = roi.VersionID
} else {
versionID = roi.VersionID
}
doi := DeletedObjectReplicationInfo{
DeletedObject: DeletedObject{
ObjectName: roi.Name,
DeleteMarkerVersionID: dmVersionID,
VersionID: versionID,
ReplicationState: roi.getReplicationState(roi.Dsc.String(), versionID, true),
DeleteMarkerMTime: DeleteMarkerMTime{roi.ModTime},
DeleteMarker: roi.DeleteMarker,
},
Bucket: roi.Bucket,
OpType: replication.ExistingObjectReplicationType,
EventType: ReplicateExistingDelete,
}
replicateDelete(ctx, doi, objectAPI)
} else {
roi.OpType = replication.ExistingObjectReplicationType
roi.EventType = ReplicateExisting
replicateObject(ctx, roi, objectAPI)
}
_, err = tgt.StatObject(ctx, tgt.Bucket, roi.Name, minio.StatObjectOptions{
VersionID: roi.VersionID,
Internal: minio.AdvancedGetOptions{
ReplicationProxyRequest: "false",
},
})
st := TargetReplicationResyncStatus{
Object: roi.Name,
Bucket: roi.Bucket,
}
success := true
if err != nil {
if roi.DeleteMarker && isErrMethodNotAllowed(ErrorRespToObjectError(err, opts.bucket, roi.Name)) {
st.ReplicatedCount++
} else {
st.FailedCount++
success = false
}
} else {
st.ReplicatedCount++
st.ReplicatedSize += roi.Size
}
s.incStats(st, opts)
traceFn(err)
globalSiteResyncMetrics.updateMetric(roi, success, opts.resyncID)
}()
}
workers.Wait()
resyncStatus = ResyncCompleted
}
// start replication resync for the remote target ARN specified
func (s *replicationResyncer) start(ctx context.Context, objAPI ObjectLayer, opts resyncOpts) error {
if opts.bucket == "" {
return fmt.Errorf("bucket name is empty")
}
if opts.arn == "" {
return fmt.Errorf("target ARN specified for resync is empty")
}
// Check if the current bucket has quota restrictions, if not skip it
cfg, err := getReplicationConfig(ctx, opts.bucket)
if err != nil {
return err
}
tgtArns := cfg.FilterTargetArns(
replication.ObjectOpts{
OpType: replication.ResyncReplicationType,
TargetArn: opts.arn,
})
if len(tgtArns) == 0 {
return fmt.Errorf("arn %s specified for resync not found in replication config", opts.arn)
}
globalReplicationPool.resyncer.RLock()
data, ok := globalReplicationPool.resyncer.statusMap[opts.bucket]
globalReplicationPool.resyncer.RUnlock()
if !ok {
data, err = loadBucketResyncMetadata(ctx, opts.bucket, objAPI)
if err != nil {
return err
}
}
// validate if resync is in progress for this arn
for tArn, st := range data.TargetsMap {
if opts.arn == tArn && (st.ResyncStatus == ResyncStarted || st.ResyncStatus == ResyncPending) {
return fmt.Errorf("Resync of bucket %s is already in progress for remote bucket %s", opts.bucket, opts.arn)
}
}
status := TargetReplicationResyncStatus{
ResyncID: opts.resyncID,
ResyncBeforeDate: opts.resyncBefore,
StartTime: UTCNow(),
ResyncStatus: ResyncPending,
Bucket: opts.bucket,
}
data.TargetsMap[opts.arn] = status
if err = saveResyncStatus(ctx, opts.bucket, data, objAPI); err != nil {
return err
}
globalReplicationPool.resyncer.Lock()
defer globalReplicationPool.resyncer.Unlock()
brs, ok := globalReplicationPool.resyncer.statusMap[opts.bucket]
if !ok {
brs = BucketReplicationResyncStatus{
Version: resyncMetaVersion,
TargetsMap: make(map[string]TargetReplicationResyncStatus),
}
}
brs.TargetsMap[opts.arn] = status
globalReplicationPool.resyncer.statusMap[opts.bucket] = brs
go globalReplicationPool.resyncer.resyncBucket(GlobalContext, objAPI, false, opts)
return nil
}
func (s *replicationResyncer) trace(resyncID string, path string) func(err error) {
startTime := time.Now()
return func(err error) {
duration := time.Since(startTime)
if globalTrace.NumSubscribers(madmin.TraceReplicationResync) > 0 {
globalTrace.Publish(replicationResyncTrace(resyncID, startTime, duration, path, err))
}
}
}
func replicationResyncTrace(resyncID string, startTime time.Time, duration time.Duration, path string, err error) madmin.TraceInfo {
var errStr string
if err != nil {
errStr = err.Error()
}
funcName := fmt.Sprintf("replication.(resyncID=%s)", resyncID)
return madmin.TraceInfo{
TraceType: madmin.TraceReplicationResync,
Time: startTime,
NodeName: globalLocalNodeName,
FuncName: funcName,
Duration: duration,
Path: path,
Error: errStr,
}
}
// delete resync metadata from replication resync state in memory
func (p *ReplicationPool) deleteResyncMetadata(ctx context.Context, bucket string) {
if p == nil {
return
}
p.resyncer.Lock()
delete(p.resyncer.statusMap, bucket)
defer p.resyncer.Unlock()
globalSiteResyncMetrics.deleteBucket(bucket)
}
// initResync - initializes bucket replication resync for all buckets.
func (p *ReplicationPool) initResync(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
if objAPI == nil {
return errServerNotInitialized
}
// Load bucket metadata sys in background
go p.startResyncRoutine(ctx, buckets, objAPI)
return nil
}
func (p *ReplicationPool) startResyncRoutine(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
// Run the replication resync in a loop
for {
if err := p.loadResync(ctx, buckets, objAPI); err == nil {
<-ctx.Done()
return
}
duration := time.Duration(r.Float64() * float64(time.Minute))
if duration < time.Second {
// Make sure to sleep atleast a second to avoid high CPU ticks.
duration = time.Second
}
time.Sleep(duration)
}
}
// Loads bucket replication resync statuses into memory.
func (p *ReplicationPool) loadResync(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
// Make sure only one node running resync on the cluster.
ctx, cancel := globalLeaderLock.GetLock(ctx)
defer cancel()
for index := range buckets {
meta, err := loadBucketResyncMetadata(ctx, buckets[index].Name, objAPI)
if err != nil {
if !errors.Is(err, errVolumeNotFound) {
logger.LogIf(ctx, err)
}
continue
}
p.resyncer.Lock()
p.resyncer.statusMap[buckets[index].Name] = meta
p.resyncer.Unlock()
}
for index := range buckets {
bucket := buckets[index].Name
var tgts map[string]TargetReplicationResyncStatus
p.resyncer.RLock()
m, ok := p.resyncer.statusMap[bucket]
if ok {
tgts = m.cloneTgtStats()
}
p.resyncer.RUnlock()
for arn, st := range tgts {
switch st.ResyncStatus {
case ResyncFailed, ResyncStarted, ResyncPending:
go p.resyncer.resyncBucket(ctx, objAPI, true, resyncOpts{
bucket: bucket,
arn: arn,
resyncID: st.ResyncID,
resyncBefore: st.ResyncBeforeDate,
})
}
}
}
return nil
}
// load bucket resync metadata from disk
func loadBucketResyncMetadata(ctx context.Context, bucket string, objAPI ObjectLayer) (brs BucketReplicationResyncStatus, e error) {
brs = newBucketResyncStatus(bucket)
resyncDirPath := path.Join(bucketMetaPrefix, bucket, replicationDir)
data, err := readConfig(GlobalContext, objAPI, pathJoin(resyncDirPath, resyncFileName))
if err != nil && err != errConfigNotFound {
return brs, err
}
if len(data) == 0 {
// Seems to be empty.
return brs, nil
}
if len(data) <= 4 {
return brs, fmt.Errorf("replication resync: no data")
}
// Read resync meta header
switch binary.LittleEndian.Uint16(data[0:2]) {
case resyncMetaFormat:
default:
return brs, fmt.Errorf("resyncMeta: unknown format: %d", binary.LittleEndian.Uint16(data[0:2]))
}
switch binary.LittleEndian.Uint16(data[2:4]) {
case resyncMetaVersion:
default:
return brs, fmt.Errorf("resyncMeta: unknown version: %d", binary.LittleEndian.Uint16(data[2:4]))
}
// OK, parse data.
if _, err = brs.UnmarshalMsg(data[4:]); err != nil {
return brs, err
}
switch brs.Version {
case resyncMetaVersionV1:
default:
return brs, fmt.Errorf("unexpected resync meta version: %d", brs.Version)
}
return brs, nil
}
// save resync status to resync.bin
func saveResyncStatus(ctx context.Context, bucket string, brs BucketReplicationResyncStatus, objectAPI ObjectLayer) error {
data := make([]byte, 4, brs.Msgsize()+4)
// Initialize the resync meta header.
binary.LittleEndian.PutUint16(data[0:2], resyncMetaFormat)
binary.LittleEndian.PutUint16(data[2:4], resyncMetaVersion)
buf, err := brs.MarshalMsg(data)
if err != nil {
return err
}
configFile := path.Join(bucketMetaPrefix, bucket, replicationDir, resyncFileName)
return saveConfig(ctx, objectAPI, configFile, buf)
}
// getReplicationDiff returns un-replicated objects in a channel.
// If a non-nil channel is returned it must be consumed fully or
// the provided context must be canceled.
func getReplicationDiff(ctx context.Context, objAPI ObjectLayer, bucket string, opts madmin.ReplDiffOpts) (chan madmin.DiffInfo, error) {
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil {
logger.LogIf(ctx, err)
return nil, err
}
tgts, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
if err != nil {
logger.LogIf(ctx, err)
return nil, err
}
objInfoCh := make(chan ObjectInfo, 10)
if err := objAPI.Walk(ctx, bucket, opts.Prefix, objInfoCh, ObjectOptions{}); err != nil {
logger.LogIf(ctx, err)
return nil, err
}
rcfg := replicationConfig{
Config: cfg,
remotes: tgts,
}
diffCh := make(chan madmin.DiffInfo, 4000)
go func() {
defer close(diffCh)
for obj := range objInfoCh {
if contextCanceled(ctx) {
// Just consume input...
continue
}
// Ignore object prefixes which are excluded
// from versioning via the MinIO bucket versioning extension.
if globalBucketVersioningSys.PrefixSuspended(bucket, obj.Name) {
continue
}
roi := getHealReplicateObjectInfo(obj, rcfg)
switch roi.ReplicationStatus {
case replication.Completed, replication.Replica:
if !opts.Verbose {
continue
}
fallthrough
default:
// ignore pre-existing objects that don't satisfy replication rule(s)
if roi.ReplicationStatus.Empty() && !roi.ExistingObjResync.mustResync() {
continue
}
tgtsMap := make(map[string]madmin.TgtDiffInfo)
for arn, st := range roi.TargetStatuses {
if opts.ARN == "" || opts.ARN == arn {
if !opts.Verbose && (st == replication.Completed || st == replication.Replica) {
continue
}
tgtsMap[arn] = madmin.TgtDiffInfo{
ReplicationStatus: st.String(),
}
}
}
for arn, st := range roi.TargetPurgeStatuses {
if opts.ARN == "" || opts.ARN == arn {
if !opts.Verbose && st == Complete {
continue
}
t, ok := tgtsMap[arn]
if !ok {
t = madmin.TgtDiffInfo{}
}
t.DeleteReplicationStatus = string(st)
tgtsMap[arn] = t
}
}
select {
case diffCh <- madmin.DiffInfo{
Object: obj.Name,
VersionID: obj.VersionID,
LastModified: obj.ModTime,
IsDeleteMarker: obj.DeleteMarker,
ReplicationStatus: string(roi.ReplicationStatus),
DeleteReplicationStatus: string(roi.VersionPurgeStatus),
ReplicationTimestamp: roi.ReplicationTimestamp,
Targets: tgtsMap,
}:
case <-ctx.Done():
continue
}
}
}
}()
return diffCh, nil
}
// QueueReplicationHeal is a wrapper for queueReplicationHeal
func QueueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo) {
// un-versioned or a prefix
if oi.VersionID == "" || oi.ModTime.IsZero() {
return
}
rcfg, _ := getReplicationConfig(ctx, bucket)
tgts, _ := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
queueReplicationHeal(ctx, bucket, oi, replicationConfig{
Config: rcfg,
remotes: tgts,
})
}
// queueReplicationHeal enqueues objects that failed replication OR eligible for resyncing through
// an ongoing resync operation or via existing objects replication configuration setting.
func queueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo, rcfg replicationConfig) (roi ReplicateObjectInfo) {
// un-versioned or a prefix
if oi.VersionID == "" || oi.ModTime.IsZero() {
return roi
}
if rcfg.Config == nil || rcfg.remotes == nil {
return roi
}
roi = getHealReplicateObjectInfo(oi, rcfg)
if !roi.Dsc.ReplicateAny() {
return
}
// early return if replication already done, otherwise we need to determine if this
// version is an existing object that needs healing.
if oi.ReplicationStatus == replication.Completed && oi.VersionPurgeStatus.Empty() && !roi.ExistingObjResync.mustResync() {
return
}
if roi.DeleteMarker || !roi.VersionPurgeStatus.Empty() {
versionID := ""
dmVersionID := ""
if roi.VersionPurgeStatus.Empty() {
dmVersionID = roi.VersionID
} else {
versionID = roi.VersionID
}
dv := DeletedObjectReplicationInfo{
DeletedObject: DeletedObject{
ObjectName: roi.Name,
DeleteMarkerVersionID: dmVersionID,
VersionID: versionID,
ReplicationState: roi.getReplicationState(roi.Dsc.String(), versionID, true),
DeleteMarkerMTime: DeleteMarkerMTime{roi.ModTime},
DeleteMarker: roi.DeleteMarker,
},
Bucket: roi.Bucket,
OpType: replication.HealReplicationType,
EventType: ReplicateHealDelete,
}
// heal delete marker replication failure or versioned delete replication failure
if roi.ReplicationStatus == replication.Pending ||
roi.ReplicationStatus == replication.Failed ||
roi.VersionPurgeStatus == Failed || roi.VersionPurgeStatus == Pending {
globalReplicationPool.queueReplicaDeleteTask(dv)
return
}
// if replication status is Complete on DeleteMarker and existing object resync required
if roi.ExistingObjResync.mustResync() && (roi.ReplicationStatus == replication.Completed || roi.ReplicationStatus.Empty()) {
queueReplicateDeletesWrapper(dv, roi.ExistingObjResync)
return
}
return
}
if roi.ExistingObjResync.mustResync() {
roi.OpType = replication.ExistingObjectReplicationType
}
switch roi.ReplicationStatus {
case replication.Pending, replication.Failed:
roi.EventType = ReplicateHeal
globalReplicationPool.queueReplicaTask(roi)
return
}
if roi.ExistingObjResync.mustResync() {
roi.EventType = ReplicateExisting
globalReplicationPool.queueReplicaTask(roi)
}
return
}
const mrfTimeInterval = 5 * time.Minute
func (p *ReplicationPool) persistMRF() {
if !p.initialized() {
return
}
var mu sync.Mutex
entries := make(map[string]MRFReplicateEntry)
mTimer := time.NewTimer(mrfTimeInterval)
defer mTimer.Stop()
saveMRFToDisk := func(drain bool) {
mu.Lock()
defer mu.Unlock()
if len(entries) == 0 {
return
}
cctx := p.ctx
if drain {
cctx = context.Background()
// drain all mrf entries and save to disk
for e := range p.mrfSaveCh {
entries[e.versionID] = e
}
}
if err := p.saveMRFEntries(cctx, entries); err != nil {
logger.LogOnceIf(p.ctx, fmt.Errorf("Unable to persist replication failures to disk:%w", err), string(replicationSubsystem))
}
entries = make(map[string]MRFReplicateEntry)
}
for {
select {
case <-mTimer.C:
saveMRFToDisk(false)
mTimer.Reset(mrfTimeInterval)
case <-p.ctx.Done():
p.mrfStopCh <- struct{}{}
close(p.mrfSaveCh)
saveMRFToDisk(true)
return
case <-p.saveStateCh:
saveMRFToDisk(true)
return
case e, ok := <-p.mrfSaveCh:
if !ok {
return
}
var cnt int
mu.Lock()
entries[e.versionID] = e
cnt = len(entries)
mu.Unlock()
if cnt >= cap(p.mrfSaveCh) || len(p.mrfSaveCh) >= int(0.8*float32(cap(p.mrfSaveCh))) {
saveMRFToDisk(true)
}
}
}
}
func (p *ReplicationPool) queueMRFSave(entry MRFReplicateEntry) {
if !p.initialized() {
return
}
select {
case <-GlobalContext.Done():
return
case <-p.mrfStopCh:
return
default:
select {
case p.mrfSaveCh <- entry:
default:
}
}
}
// save mrf entries to mrf_<uuid>.bin
func (p *ReplicationPool) saveMRFEntries(ctx context.Context, entries map[string]MRFReplicateEntry) error {
if !p.initialized() {
return nil
}
if len(entries) == 0 {
return nil
}
v := MRFReplicateEntries{
Entries: entries,
Version: mrfMetaVersionV1,
}
data := make([]byte, 4, v.Msgsize()+4)
// Initialize the resync meta header.
binary.LittleEndian.PutUint16(data[0:2], resyncMetaFormat)
binary.LittleEndian.PutUint16(data[2:4], resyncMetaVersion)
buf, err := v.MarshalMsg(data)
if err != nil {
return err
}
configFile := path.Join(replicationMRFDir, mustGetUUID()+".bin")
err = saveConfig(ctx, p.objLayer, configFile, buf)
return err
}
// load mrf entries from disk
func (p *ReplicationPool) loadMRF(fileName string) (re MRFReplicateEntries, e error) {
if !p.initialized() {
return re, nil
}
data, err := readConfig(p.ctx, p.objLayer, fileName)
if err != nil && err != errConfigNotFound {
return re, err
}
if len(data) == 0 {
// Seems to be empty.
return re, nil
}
if len(data) <= 4 {
return re, fmt.Errorf("replication mrf: no data")
}
// Read resync meta header
switch binary.LittleEndian.Uint16(data[0:2]) {
case mrfMetaFormat:
default:
return re, fmt.Errorf("replication mrf: unknown format: %d", binary.LittleEndian.Uint16(data[0:2]))
}
switch binary.LittleEndian.Uint16(data[2:4]) {
case mrfMetaVersion:
default:
return re, fmt.Errorf("replication mrf: unknown version: %d", binary.LittleEndian.Uint16(data[2:4]))
}
// OK, parse data.
if _, err = re.UnmarshalMsg(data[4:]); err != nil {
return re, err
}
switch re.Version {
case mrfMetaVersionV1:
default:
return re, fmt.Errorf("unexpected mrf meta version: %d", re.Version)
}
return re, nil
}
func (p *ReplicationPool) processMRF() {
if !p.initialized() {
return
}
pTimer := time.NewTimer(mrfTimeInterval)
defer pTimer.Stop()
for {
select {
case <-pTimer.C:
// skip healing if all targets are offline
var offlineCnt int
tgts := globalBucketTargetSys.ListTargets(p.ctx, "", "")
for _, tgt := range tgts {
if globalBucketTargetSys.isOffline(tgt.URL()) {
offlineCnt++
}
}
if len(tgts) == offlineCnt {
pTimer.Reset(mrfTimeInterval)
continue
}
objCh := make(chan ObjectInfo)
cctx, cancelFn := context.WithCancel(p.ctx)
if err := p.objLayer.Walk(cctx, minioMetaBucket, replicationMRFDir, objCh, ObjectOptions{}); err != nil {
pTimer.Reset(mrfTimeInterval)
cancelFn()
logger.LogIf(p.ctx, err)
continue
}
for item := range objCh {
if err := p.queueMRFHeal(item.Name); err == nil {
p.objLayer.DeleteObject(p.ctx, minioMetaBucket, item.Name, ObjectOptions{})
}
}
pTimer.Reset(mrfTimeInterval)
cancelFn()
case <-p.ctx.Done():
return
}
}
}
// process sends error logs to the heal channel for an attempt to heal replication.
func (p *ReplicationPool) queueMRFHeal(file string) error {
if !p.initialized() {
return errServerNotInitialized
}
mrfRec, err := p.loadMRF(file)
if err != nil {
return err
}
for vID, e := range mrfRec.Entries {
oi, err := p.objLayer.GetObjectInfo(p.ctx, e.Bucket, e.Object, ObjectOptions{
VersionID: vID,
})
if err != nil {
continue
}
QueueReplicationHeal(p.ctx, e.Bucket, oi)
}
return nil
}
// load replication stats from disk
func (p *ReplicationPool) loadStatsFromDisk() (rs map[string]BucketReplicationStats, e error) {
if !p.initialized() {
return map[string]BucketReplicationStats{}, nil
}
data, err := readConfig(p.ctx, p.objLayer, getReplicationStatsPath())
if err != nil {
if errors.Is(err, errConfigNotFound) {
return rs, nil
}
return rs, err
}
if len(data) <= 4 {
logger.LogIf(p.ctx, fmt.Errorf("replication stats: no data"))
return map[string]BucketReplicationStats{}, nil
}
// Read repl stats meta header
switch binary.LittleEndian.Uint16(data[0:2]) {
case replStatsMetaFormat:
default:
return rs, fmt.Errorf("replication stats: unknown format: %d", binary.LittleEndian.Uint16(data[0:2]))
}
switch binary.LittleEndian.Uint16(data[2:4]) {
case replStatsVersion:
default:
return rs, fmt.Errorf("replication stats: unknown version: %d", binary.LittleEndian.Uint16(data[2:4]))
}
ss := BucketStatsMap{}
if _, err = ss.UnmarshalMsg(data[4:]); err != nil {
return rs, err
}
rs = make(map[string]BucketReplicationStats, len(ss.Stats))
for bucket, st := range ss.Stats {
rs[bucket] = st.ReplicationStats
}
return rs, nil
}
func (p *ReplicationPool) initialized() bool {
return !(p == nil || p.objLayer == nil)
}
func (p *ReplicationPool) saveStatsToDisk() {
if !p.initialized() {
return
}
ctx, cancel := globalLeaderLock.GetLock(p.ctx)
defer cancel()
sTimer := time.NewTimer(replStatsSaveInterval)
defer sTimer.Stop()
for {
select {
case <-sTimer.C:
dui, err := loadDataUsageFromBackend(GlobalContext, newObjectLayerFn())
if err == nil && !dui.LastUpdate.IsZero() {
globalReplicationStats.getAllLatest(dui.BucketsUsage)
}
p.saveStats(p.ctx)
sTimer.Reset(replStatsSaveInterval)
case <-ctx.Done():
return
}
}
}
// save replication stats to .minio.sys/buckets/replication/node-name.stats
func (p *ReplicationPool) saveStats(ctx context.Context) error {
if !p.initialized() {
return nil
}
data, err := globalReplicationStats.serializeStats()
if data == nil {
return err
}
return saveConfig(ctx, p.objLayer, getReplicationStatsPath(), data)
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
// Package amztime implements AWS specific time parsing and deviations
package amztime
import (
"errors"
"net/http"
"time"
)
// Supported amz date formats.
var amzDateFormats = []string{
// Do not change this order, x-amz-date format is usually in
// iso8601Format rest are meant for relaxed handling of other
// odd SDKs that might be out there.
"20060102T150405Z",
time.RFC1123,
time.RFC1123Z,
// Add new AMZ date formats here.
}
// ErrMalformedDate always returned for dates that cannot be parsed.
var ErrMalformedDate = errors.New("malformed date")
// Parse parses date string via supported amz date formats.
func Parse(amzDateStr string) (time.Time, error) {
for _, dateFormat := range amzDateFormats {
amzDate, err := time.Parse(dateFormat, amzDateStr)
if err == nil {
return amzDate, nil
}
}
return time.Time{}, ErrMalformedDate
}
var httpTimeFormats = []string{
// Do not chagne this order, http time format dates
// are usually in http.TimeFormat however there are
// situations where for example aws-sdk-java doesn't
// send the correct format.
http.TimeFormat,
"Mon, 2 Jan 2006 15:04:05 GMT",
}
// ParseHeader parses http.TimeFormat with an acceptable
// extension for http.TimeFormat - return time might be zero
// if the timeStr is invalid.
func ParseHeader(timeStr string) (time.Time, error) {
for _, dateFormat := range httpTimeFormats {
t, err := time.Parse(dateFormat, timeStr)
if err == nil {
return t, nil
}
}
return time.Time{}, ErrMalformedDate
}
// ParseReplicationTS parse http.TimeFormat first
// will try time.RFC3339Nano when parse http.TimeFormat failed
func ParseReplicationTS(str string) (time.Time, error) {
tm, err := time.Parse(http.TimeFormat, str)
if tm.IsZero() || err != nil {
tm, err = time.Parse(time.RFC3339Nano, str)
}
return tm, err
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ioutil
import (
"errors"
"io"
"os"
"sync"
"syscall"
"github.com/dustin/go-humanize"
"github.com/minio/minio/internal/disk"
)
// ODirectReader - to support O_DIRECT reads for erasure backends.
type ODirectReader struct {
File *os.File
SmallFile bool
err error
buf []byte
bufp *[]byte
seenRead bool
}
// Block sizes constant.
const (
BlockSizeSmall = 32 * humanize.KiByte // Default r/w block size for smaller objects.
BlockSizeLarge = 2 * humanize.MiByte // Default r/w block size for larger objects.
BlockSizeReallyLarge = 4 * humanize.MiByte // Default write block size for objects per shard >= 64MiB
)
// O_DIRECT aligned sync.Pool's
var (
ODirectPoolXLarge = sync.Pool{
New: func() interface{} {
b := disk.AlignedBlock(BlockSizeReallyLarge)
return &b
},
}
ODirectPoolLarge = sync.Pool{
New: func() interface{} {
b := disk.AlignedBlock(BlockSizeLarge)
return &b
},
}
ODirectPoolSmall = sync.Pool{
New: func() interface{} {
b := disk.AlignedBlock(BlockSizeSmall)
return &b
},
}
)
// Invalid argument, unsupported flags such as O_DIRECT
func isSysErrInvalidArg(err error) bool {
return errors.Is(err, syscall.EINVAL)
}
// Read - Implements Reader interface.
func (o *ODirectReader) Read(buf []byte) (n int, err error) {
if o.err != nil && (len(o.buf) == 0 || !o.seenRead) {
return 0, o.err
}
if o.buf == nil {
if o.SmallFile {
o.bufp = ODirectPoolSmall.Get().(*[]byte)
} else {
o.bufp = ODirectPoolLarge.Get().(*[]byte)
}
}
if !o.seenRead {
o.buf = *o.bufp
n, err = o.File.Read(o.buf)
if err != nil && err != io.EOF {
if isSysErrInvalidArg(err) {
if err = disk.DisableDirectIO(o.File); err != nil {
o.err = err
return n, err
}
n, err = o.File.Read(o.buf)
}
if err != nil && err != io.EOF {
o.err = err
return n, err
}
}
if n == 0 {
// err is likely io.EOF
o.err = err
return n, err
}
o.err = err
o.buf = o.buf[:n]
o.seenRead = true
}
if len(buf) >= len(o.buf) {
n = copy(buf, o.buf)
o.seenRead = false
return n, o.err
}
n = copy(buf, o.buf)
o.buf = o.buf[n:]
// There is more left in buffer, do not return any EOF yet.
return n, nil
}
// Close - Release the buffer and close the file.
func (o *ODirectReader) Close() error {
if o.bufp != nil {
if o.SmallFile {
ODirectPoolSmall.Put(o.bufp)
} else {
ODirectPoolLarge.Put(o.bufp)
}
o.bufp = nil
o.buf = nil
}
o.err = errors.New("internal error: ODirectReader Read after Close")
return o.File.Close()
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"net"
"net/http"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"time"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/handlers"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/mcontext"
)
var ldapPwdRegex = regexp.MustCompile("(^.*?)LDAPPassword=([^&]*?)(&(.*?))?$")
// redact LDAP password if part of string
func redactLDAPPwd(s string) string {
parts := ldapPwdRegex.FindStringSubmatch(s)
if len(parts) > 3 {
return parts[1] + "LDAPPassword=*REDACTED*" + parts[3]
}
return s
}
// getOpName sanitizes the operation name for mc
func getOpName(name string) (op string) {
op = strings.TrimPrefix(name, "github.com/minio/minio/cmd.")
op = strings.TrimSuffix(op, "Handler-fm")
op = strings.Replace(op, "objectAPIHandlers", "s3", 1)
op = strings.Replace(op, "adminAPIHandlers", "admin", 1)
op = strings.Replace(op, "(*storageRESTServer)", "storageR", 1)
op = strings.Replace(op, "(*peerRESTServer)", "peer", 1)
op = strings.Replace(op, "(*lockRESTServer)", "lockR", 1)
op = strings.Replace(op, "(*stsAPIHandlers)", "sts", 1)
op = strings.Replace(op, "ClusterCheckHandler", "health.Cluster", 1)
op = strings.Replace(op, "ClusterReadCheckHandler", "health.ClusterRead", 1)
op = strings.Replace(op, "LivenessCheckHandler", "health.Liveness", 1)
op = strings.Replace(op, "ReadinessCheckHandler", "health.Readiness", 1)
op = strings.Replace(op, "-fm", "", 1)
return op
}
// If trace is enabled, execute the request if it is traced by other handlers
// otherwise, generate a trace event with request information but no response.
func httpTracer(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Setup a http request response recorder - this is needed for
// http stats requests and audit if enabled.
respRecorder := xhttp.NewResponseRecorder(w)
// Setup a http request body recorder
reqRecorder := &xhttp.RequestRecorder{Reader: r.Body}
r.Body = reqRecorder
// Create tracing data structure and associate it to the request context
tc := mcontext.TraceCtxt{
AmzReqID: r.Header.Get(xhttp.AmzRequestID),
RequestRecorder: reqRecorder,
ResponseRecorder: respRecorder,
}
r = r.WithContext(context.WithValue(r.Context(), mcontext.ContextTraceKey, &tc))
reqStartTime := time.Now().UTC()
h.ServeHTTP(respRecorder, r)
reqEndTime := time.Now().UTC()
if globalTrace.NumSubscribers(madmin.TraceS3|madmin.TraceInternal) == 0 {
// no subscribers nothing to trace.
return
}
tt := madmin.TraceInternal
if strings.HasPrefix(tc.FuncName, "s3.") {
tt = madmin.TraceS3
}
// Calculate input body size with headers
reqHeaders := r.Header.Clone()
reqHeaders.Set("Host", r.Host)
if len(r.TransferEncoding) == 0 {
reqHeaders.Set("Content-Length", strconv.Itoa(int(r.ContentLength)))
} else {
reqHeaders.Set("Transfer-Encoding", strings.Join(r.TransferEncoding, ","))
}
inputBytes := reqRecorder.Size()
for k, v := range reqHeaders {
inputBytes += len(k) + len(v)
}
// Calculate node name
nodeName := r.Host
if globalIsDistErasure {
nodeName = globalLocalNodeName
}
if host, port, err := net.SplitHostPort(nodeName); err == nil {
if port == "443" || port == "80" {
nodeName = host
}
}
// Calculate reqPath
reqPath := r.URL.RawPath
if reqPath == "" {
reqPath = r.URL.Path
}
// Calculate function name
funcName := tc.FuncName
if funcName == "" {
funcName = "<unknown>"
}
t := madmin.TraceInfo{
TraceType: tt,
FuncName: funcName,
NodeName: nodeName,
Time: reqStartTime,
Duration: reqEndTime.Sub(respRecorder.StartTime),
Path: reqPath,
HTTP: &madmin.TraceHTTPStats{
ReqInfo: madmin.TraceRequestInfo{
Time: reqStartTime,
Proto: r.Proto,
Method: r.Method,
RawQuery: redactLDAPPwd(r.URL.RawQuery),
Client: handlers.GetSourceIP(r),
Headers: reqHeaders,
Path: reqPath,
Body: reqRecorder.Data(),
},
RespInfo: madmin.TraceResponseInfo{
Time: reqEndTime,
Headers: respRecorder.Header().Clone(),
StatusCode: respRecorder.StatusCode,
Body: respRecorder.Body(),
},
CallStats: madmin.TraceCallStats{
Latency: reqEndTime.Sub(respRecorder.StartTime),
InputBytes: inputBytes,
OutputBytes: respRecorder.Size(),
TimeToFirstByte: respRecorder.TimeToFirstByte,
},
},
}
globalTrace.Publish(t)
})
}
func httpTrace(f http.HandlerFunc, logBody bool) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
tc, ok := r.Context().Value(mcontext.ContextTraceKey).(*mcontext.TraceCtxt)
if !ok {
// Tracing is not enabled for this request
f.ServeHTTP(w, r)
return
}
tc.FuncName = getOpName(runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name())
tc.RequestRecorder.LogBody = logBody
tc.ResponseRecorder.LogAllBody = logBody
tc.ResponseRecorder.LogErrBody = true
f.ServeHTTP(w, r)
}
}
func httpTraceAll(f http.HandlerFunc) http.HandlerFunc {
return httpTrace(f, true)
}
func httpTraceHdrs(f http.HandlerFunc) http.HandlerFunc {
return httpTrace(f, false)
}
<file_sep>#!/usr/bin/env bash
set -e
export GORACE="history_size=7"
export MINIO_API_REQUESTS_MAX=10000
for d in $(go list ./...); do
CGO_ENABLED=1 go test -v -race --timeout 100m "$d"
done
<file_sep>//go:build linux
// +build linux
// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package http
import (
"context"
"net"
"syscall"
"time"
)
// CheckPortAvailability - check if given host and port is already in use.
// Note: The check method tries to listen on given port and closes it.
// It is possible to have a disconnected client in this tiny window of time.
func CheckPortAvailability(host, port string, opts TCPOptions) (err error) {
lc := &net.ListenConfig{
Control: func(network, address string, c syscall.RawConn) error {
c.Control(func(fdPtr uintptr) {
if opts.Interface != "" {
// When interface is specified look for specifically port availability on
// the specified interface if any.
_ = syscall.SetsockoptString(int(fdPtr), syscall.SOL_SOCKET, syscall.SO_BINDTODEVICE, opts.Interface)
}
})
return nil
},
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
l, err := lc.Listen(ctx, "tcp", net.JoinHostPort(host, port))
if err != nil {
return err
}
// As we are able to listen on this network, the port is not in use.
// Close the listener and continue check other networks.
return l.Close()
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"testing"
"github.com/minio/minio/internal/bucket/replication"
)
var replicatedInfosTests = []struct {
name string
tgtInfos []replicatedTargetInfo
expectedCompletedSize int64
expectedReplicationStatusInternal string
expectedReplicationStatus replication.StatusType
expectedOpType replication.Type
expectedAction replicationAction
}{
{ // 1. empty tgtInfos slice
name: "no replicated targets",
tgtInfos: []replicatedTargetInfo{},
expectedCompletedSize: 0,
expectedReplicationStatusInternal: "",
expectedReplicationStatus: replication.StatusType(""),
expectedOpType: replication.UnsetReplicationType,
expectedAction: replicateNone,
},
{ // 2. replication completed to single target
name: "replication completed to single target",
tgtInfos: []replicatedTargetInfo{
{
Arn: "arn1",
Size: 249,
PrevReplicationStatus: replication.Pending,
ReplicationStatus: replication.Completed,
OpType: replication.ObjectReplicationType,
ReplicationAction: replicateAll,
},
},
expectedCompletedSize: 249,
expectedReplicationStatusInternal: "arn1=COMPLETED;",
expectedReplicationStatus: replication.Completed,
expectedOpType: replication.ObjectReplicationType,
expectedAction: replicateAll,
},
{ // 3. replication completed to single target; failed to another
name: "replication completed to single target",
tgtInfos: []replicatedTargetInfo{
{
Arn: "arn1",
Size: 249,
PrevReplicationStatus: replication.Pending,
ReplicationStatus: replication.Completed,
OpType: replication.ObjectReplicationType,
ReplicationAction: replicateAll,
},
{
Arn: "arn2",
Size: 249,
PrevReplicationStatus: replication.Pending,
ReplicationStatus: replication.Failed,
OpType: replication.ObjectReplicationType,
ReplicationAction: replicateAll,
},
},
expectedCompletedSize: 249,
expectedReplicationStatusInternal: "arn1=COMPLETED;arn2=FAILED;",
expectedReplicationStatus: replication.Failed,
expectedOpType: replication.ObjectReplicationType,
expectedAction: replicateAll,
},
{ // 4. replication pending on one target; failed to another
name: "replication completed to single target",
tgtInfos: []replicatedTargetInfo{
{
Arn: "arn1",
Size: 249,
PrevReplicationStatus: replication.Pending,
ReplicationStatus: replication.Pending,
OpType: replication.ObjectReplicationType,
ReplicationAction: replicateAll,
},
{
Arn: "arn2",
Size: 249,
PrevReplicationStatus: replication.Pending,
ReplicationStatus: replication.Failed,
OpType: replication.ObjectReplicationType,
ReplicationAction: replicateAll,
},
},
expectedCompletedSize: 0,
expectedReplicationStatusInternal: "arn1=PENDING;arn2=FAILED;",
expectedReplicationStatus: replication.Failed,
expectedOpType: replication.ObjectReplicationType,
expectedAction: replicateAll,
},
}
func TestReplicatedInfos(t *testing.T) {
for i, test := range replicatedInfosTests {
rinfos := replicatedInfos{
Targets: test.tgtInfos,
}
if actualSize := rinfos.CompletedSize(); actualSize != test.expectedCompletedSize {
t.Errorf("Test%d (%s): Size got %d , want %d", i+1, test.name, actualSize, test.expectedCompletedSize)
}
if repStatusStr := rinfos.ReplicationStatusInternal(); repStatusStr != test.expectedReplicationStatusInternal {
t.Errorf("Test%d (%s): Internal replication status got %s , want %s", i+1, test.name, repStatusStr, test.expectedReplicationStatusInternal)
}
if repStatus := rinfos.ReplicationStatus(); repStatus != test.expectedReplicationStatus {
t.Errorf("Test%d (%s): ReplicationStatus got %s , want %s", i+1, test.name, repStatus, test.expectedReplicationStatus)
}
if action := rinfos.Action(); action != test.expectedAction {
t.Errorf("Test%d (%s): Action got %s , want %s", i+1, test.name, action, test.expectedAction)
}
}
}
var parseReplicationDecisionTest = []struct {
name string
dsc string
expDsc ReplicateDecision
expErr error
}{
{ // 1.
name: "empty string",
dsc: "",
expDsc: ReplicateDecision{
targetsMap: map[string]replicateTargetDecision{},
},
expErr: nil,
},
{ // 2.
name: "replicate decision for one target",
dsc: "arn:minio:replication::id:bucket=true;false;arn:minio:replication::id:bucket;id",
expErr: nil,
expDsc: ReplicateDecision{
targetsMap: map[string]replicateTargetDecision{
"arn:minio:replication::id:bucket": newReplicateTargetDecision("arn:minio:replication::id:bucket", true, false),
},
},
},
{ // 3.
name: "replicate decision for multiple targets",
dsc: "arn:minio:replication::id:bucket=true;false;arn:minio:replication::id:bucket;id,arn:minio:replication::id2:bucket=false;true;arn:minio:replication::id2:bucket;id2",
expErr: nil,
expDsc: ReplicateDecision{
targetsMap: map[string]replicateTargetDecision{
"arn:minio:replication::id:bucket": newReplicateTargetDecision("arn:minio:replication::id:bucket", true, false),
"arn:minio:replication::id2:bucket": newReplicateTargetDecision("arn:minio:replication::id2:bucket", false, true),
},
},
},
{ // 4.
name: "invalid format replicate decision for one target",
dsc: "arn:minio:replication::id:bucket:true;false;arn:minio:replication::id:bucket;id",
expErr: errInvalidReplicateDecisionFormat,
expDsc: ReplicateDecision{
targetsMap: map[string]replicateTargetDecision{
"arn:minio:replication::id:bucket": newReplicateTargetDecision("arn:minio:replication::id:bucket", true, false),
},
},
},
}
func TestParseReplicateDecision(t *testing.T) {
for i, test := range parseReplicationDecisionTest {
dsc, err := parseReplicateDecision(test.expDsc.String())
if err != nil {
if test.expErr != err {
t.Errorf("Test%d (%s): Expected parse error got %t , want %t", i+1, test.name, err, test.expErr)
}
continue
}
if len(dsc.targetsMap) != len(test.expDsc.targetsMap) {
t.Errorf("Test%d (%s): Invalid number of entries in targetsMap got %d , want %d", i+1, test.name, len(dsc.targetsMap), len(test.expDsc.targetsMap))
}
for arn, tdsc := range dsc.targetsMap {
expDsc, ok := test.expDsc.targetsMap[arn]
if !ok || expDsc != tdsc {
t.Errorf("Test%d (%s): Invalid target replicate decision: got %+v, want %+v", i+1, test.name, tdsc, expDsc)
}
}
}
}
var replicationStateTest = []struct {
name string
rs ReplicationState
arn string
expStatus replication.StatusType
}{
{ // 1. no replication status header
name: "no replicated targets",
rs: ReplicationState{},
expStatus: replication.StatusType(""),
},
{ // 2. replication status for one target
name: "replication status for one target",
rs: ReplicationState{ReplicationStatusInternal: "arn1=PENDING;", Targets: map[string]replication.StatusType{"arn1": "PENDING"}},
expStatus: replication.Pending,
},
{ // 3. replication status for one target - incorrect format
name: "replication status for one target",
rs: ReplicationState{ReplicationStatusInternal: "arn1=PENDING"},
expStatus: replication.StatusType(""),
},
{ // 4. replication status for 3 targets, one of them failed
name: "replication status for 3 targets - one failed",
rs: ReplicationState{
ReplicationStatusInternal: "arn1=COMPLETED;arn2=COMPLETED;arn3=FAILED;",
Targets: map[string]replication.StatusType{"arn1": "COMPLETED", "arn2": "COMPLETED", "arn3": "FAILED"},
},
expStatus: replication.Failed,
},
{ // 5. replication status for replica version
name: "replication status for replica version",
rs: ReplicationState{ReplicationStatusInternal: string(replication.Replica)},
expStatus: replication.Replica,
},
}
func TestCompositeReplicationStatus(t *testing.T) {
for i, test := range replicationStateTest {
if rstatus := test.rs.CompositeReplicationStatus(); rstatus != test.expStatus {
t.Errorf("Test%d (%s): Overall replication status got %s , want %s", i+1, test.name, rstatus, test.expStatus)
}
}
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bufio"
"bytes"
"fmt"
"io"
"net/http"
"strings"
)
// newUnsignedV4ChunkedReader returns a new s3UnsignedChunkedReader that translates the data read from r
// out of HTTP "chunked" format before returning it.
// The s3ChunkedReader returns io.EOF when the final 0-length chunk is read.
func newUnsignedV4ChunkedReader(req *http.Request, trailer bool) (io.ReadCloser, APIErrorCode) {
if trailer {
// Discard anything unsigned.
req.Trailer = make(http.Header)
trailers := req.Header.Values(awsTrailerHeader)
for _, key := range trailers {
req.Trailer.Add(key, "")
}
} else {
req.Trailer = nil
}
return &s3UnsignedChunkedReader{
trailers: req.Trailer,
reader: bufio.NewReader(req.Body),
buffer: make([]byte, 64*1024),
}, ErrNone
}
// Represents the overall state that is required for decoding a
// AWS Signature V4 chunked reader.
type s3UnsignedChunkedReader struct {
reader *bufio.Reader
trailers http.Header
buffer []byte
offset int
err error
debug bool
}
func (cr *s3UnsignedChunkedReader) Close() (err error) {
return cr.err
}
// Read - implements `io.Reader`, which transparently decodes
// the incoming AWS Signature V4 streaming signature.
func (cr *s3UnsignedChunkedReader) Read(buf []byte) (n int, err error) {
// First, if there is any unread data, copy it to the client
// provided buffer.
if cr.offset > 0 {
n = copy(buf, cr.buffer[cr.offset:])
if n == len(buf) {
cr.offset += n
return n, nil
}
cr.offset = 0
buf = buf[n:]
}
// mustRead reads from input and compares against provided slice.
mustRead := func(b ...byte) error {
for _, want := range b {
got, err := cr.reader.ReadByte()
if err == io.EOF {
return io.ErrUnexpectedEOF
}
if got != want {
if cr.debug {
fmt.Printf("mustread: want: %q got: %q\n", string(want), string(got))
}
return errMalformedEncoding
}
if err != nil {
return err
}
}
return nil
}
var size int
for {
b, err := cr.reader.ReadByte()
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
if err != nil {
cr.err = err
return n, cr.err
}
if b == '\r' { // \r\n denotes end of size.
err := mustRead('\n')
if err != nil {
cr.err = err
return n, cr.err
}
break
}
// Manually deserialize the size since AWS specified
// the chunk size to be of variable width. In particular,
// a size of 16 is encoded as `10` while a size of 64 KB
// is `10000`.
switch {
case b >= '0' && b <= '9':
size = size<<4 | int(b-'0')
case b >= 'a' && b <= 'f':
size = size<<4 | int(b-('a'-10))
case b >= 'A' && b <= 'F':
size = size<<4 | int(b-('A'-10))
default:
if cr.debug {
fmt.Printf("err size: %v\n", string(b))
}
cr.err = errMalformedEncoding
return n, cr.err
}
if size > maxChunkSize {
cr.err = errChunkTooBig
return n, cr.err
}
}
if cap(cr.buffer) < size {
cr.buffer = make([]byte, size)
} else {
cr.buffer = cr.buffer[:size]
}
// Now, we read the payload.
_, err = io.ReadFull(cr.reader, cr.buffer)
if err == io.EOF && size != 0 {
err = io.ErrUnexpectedEOF
}
if err != nil && err != io.EOF {
cr.err = err
return n, cr.err
}
// If the chunk size is zero we return io.EOF. As specified by AWS,
// only the last chunk is zero-sized.
if len(cr.buffer) == 0 {
if cr.debug {
fmt.Println("EOF")
}
if cr.trailers != nil {
err = cr.readTrailers()
if cr.debug {
fmt.Println("trailer returned:", err)
}
if err != nil {
cr.err = err
return 0, err
}
}
cr.err = io.EOF
return n, cr.err
}
// read final terminator.
err = mustRead('\r', '\n')
if err != nil && err != io.EOF {
cr.err = err
return n, cr.err
}
cr.offset = copy(buf, cr.buffer)
n += cr.offset
return n, err
}
// readTrailers will read all trailers and populate cr.trailers with actual values.
func (cr *s3UnsignedChunkedReader) readTrailers() error {
var valueBuffer bytes.Buffer
// Read value
for {
v, err := cr.reader.ReadByte()
if err != nil {
if err == io.EOF {
return io.ErrUnexpectedEOF
}
}
if v != '\r' {
valueBuffer.WriteByte(v)
continue
}
// Must end with \r\n\r\n
var tmp [3]byte
_, err = io.ReadFull(cr.reader, tmp[:])
if err != nil {
if err == io.EOF {
return io.ErrUnexpectedEOF
}
}
if !bytes.Equal(tmp[:], []byte{'\n', '\r', '\n'}) {
if cr.debug {
fmt.Printf("got %q, want %q\n", string(tmp[:]), "\n\r\n")
}
return errMalformedEncoding
}
break
}
// Parse trailers.
wantTrailers := make(map[string]struct{}, len(cr.trailers))
for k := range cr.trailers {
wantTrailers[strings.ToLower(k)] = struct{}{}
}
input := bufio.NewScanner(bytes.NewReader(valueBuffer.Bytes()))
for input.Scan() {
line := strings.TrimSpace(input.Text())
if line == "" {
continue
}
// Find first separator.
idx := strings.IndexByte(line, trailerKVSeparator[0])
if idx <= 0 || idx >= len(line) {
if cr.debug {
fmt.Printf("Could not find separator, got %q\n", line)
}
return errMalformedEncoding
}
key := strings.ToLower(line[:idx])
value := line[idx+1:]
if _, ok := wantTrailers[key]; !ok {
if cr.debug {
fmt.Printf("Unknown key %q - expected on of %v\n", key, cr.trailers)
}
return errMalformedEncoding
}
cr.trailers.Set(key, value)
delete(wantTrailers, key)
}
// Check if we got all we want.
if len(wantTrailers) > 0 {
return io.ErrUnexpectedEOF
}
return nil
}
<file_sep>#!/bin/bash -e
#
set -E
set -o pipefail
set -x
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
fi
WORK_DIR="$PWD/.verify-$RANDOM"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO_OLD=("$PWD/minio.RELEASE.2021-11-24T23-19-33Z" --config-dir "$MINIO_CONFIG_DIR" server)
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
function download_old_release() {
if [ ! -f minio.RELEASE.2021-11-24T23-19-33Z ]; then
curl --silent -O https://dl.minio.io/server/minio/release/linux-amd64/archive/minio.RELEASE.2021-11-24T23-19-33Z
chmod a+x minio.RELEASE.2021-11-24T23-19-33Z
fi
}
function start_minio_16drive() {
start_port=$1
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=<PASSWORD>
export MC_HOST_minio="http://minio:[email protected]:${start_port}/"
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
export _MINIO_SHARD_DISKTIME_DELTA="5s" # do not change this as its needed for tests
export MINIO_CI_CD=1
MC_BUILD_DIR="mc-$RANDOM"
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
echo "failed to download https://github.com/minio/mc"
purge "${MC_BUILD_DIR}"
exit 1
fi
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
# remove mc source.
purge "${MC_BUILD_DIR}"
"${MINIO_OLD[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 30
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
shred --iterations=1 --size=5241856 - 1>"${WORK_DIR}/unaligned" 2>/dev/null
"${WORK_DIR}/mc" mb minio/healing-shard-bucket --quiet
"${WORK_DIR}/mc" cp \
"${WORK_DIR}/unaligned" \
minio/healing-shard-bucket/unaligned \
--disable-multipart --quiet
## "unaligned" object name gets consistently distributed
## to disks in following distribution order
##
## NOTE: if you change the name make sure to change the
## distribution order present here
##
## [15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
## make sure to remove the "last" data shard
rm -rf "${WORK_DIR}/xl14/healing-shard-bucket/unaligned"
sleep 10
## Heal the shard
"${WORK_DIR}/mc" admin heal --quiet --recursive minio/healing-shard-bucket
## then remove any other data shard let's pick first disk
## - 1st data shard.
rm -rf "${WORK_DIR}/xl3/healing-shard-bucket/unaligned"
sleep 10
go build ./docs/debugging/s3-check-md5/
if ! ./s3-check-md5 \
-debug \
-access-key minio \
-secret-key minio123 \
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep CORRUPTED; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
pkill minio
sleep 3
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 30
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
if ! ./s3-check-md5 \
-debug \
-access-key minio \
-secret-key minio123 \
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
mkdir -p inspects
(
cd inspects
"${WORK_DIR}/mc" support inspect minio/healing-shard-bucket/unaligned/**
)
"${WORK_DIR}/mc" mb play/inspects
"${WORK_DIR}/mc" mirror inspects play/inspects
purge "$WORK_DIR"
exit 1
fi
"${WORK_DIR}/mc" admin heal --quiet --recursive minio/healing-shard-bucket
if ! ./s3-check-md5 \
-debug \
-access-key minio \
-secret-key minio123 \
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
mkdir -p inspects
(
cd inspects
"${WORK_DIR}/mc" support inspect minio/healing-shard-bucket/unaligned/**
)
"${WORK_DIR}/mc" mb play/inspects
"${WORK_DIR}/mc" mirror inspects play/inspects
purge "$WORK_DIR"
exit 1
fi
pkill minio
sleep 3
}
function main() {
download_old_release
start_port=$(shuf -i 10000-65000 -n 1)
start_minio_16drive ${start_port}
}
function purge() {
rm -rf "$1"
}
(main "$@")
rv=$?
purge "$WORK_DIR"
exit "$rv"
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"github.com/minio/minio/internal/bucket/lifecycle"
)
// objSweeper determines if a transitioned object needs to be removed from the remote tier.
// A typical usage would be like,
// os := newObjSweeper(bucket, object)
// // Perform a ObjectLayer.GetObjectInfo to fetch object version information
// goiOpts := os.GetOpts()
// gerr := objAPI.GetObjectInfo(ctx, bucket, object, goiOpts)
//
// if gerr == nil {
// os.SetTransitionState(goi)
// }
//
// // After the overwriting object operation is complete.
//
// if jentry, ok := os.ShouldRemoveRemoteObject(); ok {
// err := globalTierJournal.AddEntry(jentry)
// logger.LogIf(ctx, err)
// }
type objSweeper struct {
Object string
Bucket string
VersionID string // version ID set by application, applies only to DeleteObject and DeleteObjects APIs
Versioned bool
Suspended bool
TransitionStatus string
TransitionTier string
TransitionVersionID string
RemoteObject string
}
// newObjSweeper returns an objSweeper for a given bucket and object.
// It initializes the versioning information using bucket name.
func newObjSweeper(bucket, object string) *objSweeper {
return &objSweeper{
Object: object,
Bucket: bucket,
}
}
// WithVersion sets the version ID from v
func (os *objSweeper) WithVersion(vid string) *objSweeper {
os.VersionID = vid
return os
}
// WithVersioning sets bucket versioning for sweeper.
func (os *objSweeper) WithVersioning(versioned, suspended bool) *objSweeper {
os.Versioned = versioned
os.Suspended = suspended
return os
}
// GetOpts returns ObjectOptions to fetch the object version that may be
// overwritten or deleted depending on bucket versioning status.
func (os *objSweeper) GetOpts() ObjectOptions {
opts := ObjectOptions{
VersionID: os.VersionID,
Versioned: os.Versioned,
VersionSuspended: os.Suspended,
}
if os.Suspended && os.VersionID == "" {
opts.VersionID = nullVersionID
}
return opts
}
// SetTransitionState sets ILM transition related information from given info.
func (os *objSweeper) SetTransitionState(info TransitionedObject) {
os.TransitionTier = info.Tier
os.TransitionStatus = info.Status
os.RemoteObject = info.Name
os.TransitionVersionID = info.VersionID
}
// shouldRemoveRemoteObject determines if a transitioned object should be
// removed from remote tier. If remote object is to be deleted, returns the
// corresponding tier deletion journal entry and true. Otherwise returns empty
// jentry value and false.
func (os *objSweeper) shouldRemoveRemoteObject() (jentry, bool) {
if os.TransitionStatus != lifecycle.TransitionComplete {
return jentry{}, false
}
// 1. If bucket versioning is disabled, remove the remote object.
// 2. If bucket versioning is suspended and
// a. version id is specified, remove its remote object.
// b. version id is not specified, remove null version's remote object if it exists.
// 3. If bucket versioning is enabled and
// a. version id is specified, remove its remote object.
// b. version id is not specified, nothing to be done (a delete marker is added).
delTier := false
switch {
case !os.Versioned, os.Suspended: // 1, 2.a, 2.b
delTier = true
case os.Versioned && os.VersionID != "": // 3.a
delTier = true
}
if delTier {
return jentry{
ObjName: os.RemoteObject,
VersionID: os.TransitionVersionID,
TierName: os.TransitionTier,
}, true
}
return jentry{}, false
}
// Sweep removes the transitioned object if it's no longer referred to.
func (os *objSweeper) Sweep() error {
if je, ok := os.shouldRemoveRemoteObject(); ok {
return globalTierJournal.AddEntry(je)
}
return nil
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"fmt"
"time"
)
//go:generate msgp -file $GOFILE
// ReplicationLatency holds information of bucket operations latency, such us uploads
type ReplicationLatency struct {
// Single & Multipart PUTs latency
UploadHistogram LastMinuteHistogram
}
// Merge two replication latency into a new one
func (rl ReplicationLatency) merge(other ReplicationLatency) (newReplLatency ReplicationLatency) {
newReplLatency.UploadHistogram = rl.UploadHistogram.Merge(other.UploadHistogram)
return
}
// Get upload latency of each object size range
func (rl ReplicationLatency) getUploadLatency() (ret map[string]uint64) {
ret = make(map[string]uint64)
avg := rl.UploadHistogram.GetAvgData()
for k, v := range avg {
// Convert nanoseconds to milliseconds
ret[sizeTagToString(k)] = uint64(v.avg() / time.Millisecond)
}
return
}
// Update replication upload latency with a new value
func (rl *ReplicationLatency) update(size int64, duration time.Duration) {
rl.UploadHistogram.Add(size, duration)
}
// BucketStatsMap captures bucket statistics for all buckets
type BucketStatsMap struct {
Stats map[string]BucketStats
Timestamp time.Time
}
// BucketStats bucket statistics
type BucketStats struct {
ReplicationStats BucketReplicationStats
}
// BucketReplicationStats represents inline replication statistics
// such as pending, failed and completed bytes in total for a bucket
type BucketReplicationStats struct {
Stats map[string]*BucketReplicationStat `json:",omitempty"`
// Pending size in bytes
PendingSize int64 `json:"pendingReplicationSize"`
// Completed size in bytes
ReplicatedSize int64 `json:"completedReplicationSize"`
// Total Replica size in bytes
ReplicaSize int64 `json:"replicaSize"`
// Failed size in bytes
FailedSize int64 `json:"failedReplicationSize"`
// Total number of pending operations including metadata updates
PendingCount int64 `json:"pendingReplicationCount"`
// Total number of failed operations including metadata updates
FailedCount int64 `json:"failedReplicationCount"`
}
// Empty returns true if there are no target stats
func (brs *BucketReplicationStats) Empty() bool {
return len(brs.Stats) == 0 && brs.ReplicaSize == 0
}
// Clone creates a new BucketReplicationStats copy
func (brs BucketReplicationStats) Clone() (c BucketReplicationStats) {
// This is called only by replicationStats cache and already holds a
// read lock before calling Clone()
c = brs
// We need to copy the map, so we do not reference the one in `brs`.
c.Stats = make(map[string]*BucketReplicationStat, len(brs.Stats))
for arn, st := range brs.Stats {
// make a copy of `*st`
s := *st
c.Stats[arn] = &s
}
return c
}
// BucketReplicationStat represents inline replication statistics
// such as pending, failed and completed bytes in total for a bucket
// remote target
type BucketReplicationStat struct {
// Pending size in bytes
PendingSize int64 `json:"pendingReplicationSize"`
// Completed size in bytes
ReplicatedSize int64 `json:"completedReplicationSize"`
// Total Replica size in bytes
ReplicaSize int64 `json:"replicaSize"`
// Failed size in bytes
FailedSize int64 `json:"failedReplicationSize"`
// Total number of pending operations including metadata updates
PendingCount int64 `json:"pendingReplicationCount"`
// Total number of failed operations including metadata updates
FailedCount int64 `json:"failedReplicationCount"`
// Replication latency information
Latency ReplicationLatency `json:"replicationLatency"`
// bandwidth limit for target
BandWidthLimitInBytesPerSecond int64 `json:"limitInBits"`
// current bandwidth reported
CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth"`
}
func (bs *BucketReplicationStat) hasReplicationUsage() bool {
return bs.FailedSize > 0 ||
bs.ReplicatedSize > 0 ||
bs.ReplicaSize > 0 ||
bs.FailedCount > 0 ||
bs.PendingCount > 0 ||
bs.PendingSize > 0
}
func (brs BucketReplicationStats) String() string {
s := "ReplicatedSize=" + fmt.Sprintf("%d", brs.ReplicatedSize) + "+\n ReplicaSize=" + fmt.Sprintf("%d", brs.ReplicaSize)
for arn, st := range brs.Stats {
s += "\n arn: " + arn + " ReplicatedSize=" + fmt.Sprintf("%d", st.ReplicatedSize) + " +::ReplicaSize=" + fmt.Sprintf("%d", st.ReplicaSize)
}
return s
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"errors"
"net/http"
"time"
jwtgo "github.com/golang-jwt/jwt/v4"
jwtreq "github.com/golang-jwt/jwt/v4/request"
lru "github.com/hashicorp/golang-lru"
"github.com/minio/minio/internal/auth"
xjwt "github.com/minio/minio/internal/jwt"
"github.com/minio/minio/internal/logger"
iampolicy "github.com/minio/pkg/iam/policy"
)
const (
jwtAlgorithm = "Bearer"
// Default JWT token for web handlers is one day.
defaultJWTExpiry = 24 * time.Hour
// Inter-node JWT token expiry is 15 minutes.
defaultInterNodeJWTExpiry = 15 * time.Minute
)
var (
errInvalidAccessKeyID = errors.New("The access key ID you provided does not exist in our records")
errAccessKeyDisabled = errors.New("The access key you provided is disabled")
errAuthentication = errors.New("Authentication failed, check your access credentials")
errNoAuthToken = errors.New("JWT token missing")
)
// cachedAuthenticateNode will cache authenticateNode results for given values up to ttl.
func cachedAuthenticateNode(ttl time.Duration) func(accessKey, secretKey, audience string) (string, error) {
type key struct {
accessKey, secretKey, audience string
}
type value struct {
created time.Time
res string
err error
}
cache, err := lru.NewARC(100)
if err != nil {
logger.LogIf(GlobalContext, err)
return authenticateNode
}
return func(accessKey, secretKey, audience string) (string, error) {
k := key{accessKey: accessKey, secretKey: secretKey, audience: audience}
v, ok := cache.Get(k)
if ok {
if val, ok := v.(*value); ok && time.Since(val.created) < ttl {
return val.res, val.err
}
}
s, err := authenticateNode(accessKey, secretKey, audience)
cache.Add(k, &value{created: time.Now(), res: s, err: err})
return s, err
}
}
func authenticateNode(accessKey, secretKey, audience string) (string, error) {
claims := xjwt.NewStandardClaims()
claims.SetExpiry(UTCNow().Add(defaultInterNodeJWTExpiry))
claims.SetAccessKey(accessKey)
claims.SetAudience(audience)
jwt := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, claims)
return jwt.SignedString([]byte(secretKey))
}
// Check if the request is authenticated.
// Returns nil if the request is authenticated. errNoAuthToken if token missing.
// Returns errAuthentication for all other errors.
func metricsRequestAuthenticate(req *http.Request) (*xjwt.MapClaims, []string, bool, error) {
token, err := jwtreq.AuthorizationHeaderExtractor.ExtractToken(req)
if err != nil {
if err == jwtreq.ErrNoTokenInRequest {
return nil, nil, false, errNoAuthToken
}
return nil, nil, false, err
}
claims := xjwt.NewMapClaims()
if err := xjwt.ParseWithClaims(token, claims, func(claims *xjwt.MapClaims) ([]byte, error) {
if claims.AccessKey != globalActiveCred.AccessKey {
u, ok := globalIAMSys.GetUser(req.Context(), claims.AccessKey)
if !ok {
// Credentials will be invalid but for disabled
// return a different error in such a scenario.
if u.Credentials.Status == auth.AccountOff {
return nil, errAccessKeyDisabled
}
return nil, errInvalidAccessKeyID
}
cred := u.Credentials
return []byte(cred.SecretKey), nil
} // this means claims.AccessKey == rootAccessKey
if !globalAPIConfig.permitRootAccess() {
// if root access is disabled, fail this request.
return nil, errAccessKeyDisabled
}
return []byte(globalActiveCred.SecretKey), nil
}); err != nil {
return claims, nil, false, errAuthentication
}
owner := true
var groups []string
if globalActiveCred.AccessKey != claims.AccessKey {
// Check if the access key is part of users credentials.
u, ok := globalIAMSys.GetUser(req.Context(), claims.AccessKey)
if !ok {
return nil, nil, false, errInvalidAccessKeyID
}
ucred := u.Credentials
// get embedded claims
eclaims, s3Err := checkClaimsFromToken(req, ucred)
if s3Err != ErrNone {
return nil, nil, false, errAuthentication
}
for k, v := range eclaims {
claims.MapClaims[k] = v
}
// if root access is disabled, disable all its service accounts and temporary credentials.
if ucred.ParentUser == globalActiveCred.AccessKey && !globalAPIConfig.permitRootAccess() {
return nil, nil, false, errAccessKeyDisabled
}
// Now check if we have a sessionPolicy.
if _, ok = eclaims[iampolicy.SessionPolicyName]; ok {
owner = false
} else {
owner = globalActiveCred.AccessKey == ucred.ParentUser
}
groups = ucred.Groups
}
return claims, groups, owner, nil
}
// newCachedAuthToken returns a token that is cached up to 15 seconds.
// If globalActiveCred is updated it is reflected at once.
func newCachedAuthToken() func(audience string) string {
fn := cachedAuthenticateNode(15 * time.Second)
return func(audience string) string {
cred := globalActiveCred
token, err := fn(cred.AccessKey, cred.SecretKey, audience)
logger.CriticalIf(GlobalContext, err)
return token
}
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package kafka
import (
"context"
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"net"
"os"
"path/filepath"
"reflect"
"sync"
"sync/atomic"
"time"
"github.com/Shopify/sarama"
saramatls "github.com/Shopify/sarama/tools/tls"
"github.com/tidwall/gjson"
"github.com/minio/minio/internal/logger/target/types"
"github.com/minio/minio/internal/once"
"github.com/minio/minio/internal/store"
xnet "github.com/minio/pkg/net"
)
// the suffix for the configured queue dir where the logs will be persisted.
const kafkaLoggerExtension = ".kafka.log"
// Config - kafka target arguments.
type Config struct {
Enabled bool `json:"enable"`
Brokers []xnet.Host `json:"brokers"`
Topic string `json:"topic"`
Version string `json:"version"`
TLS struct {
Enable bool `json:"enable"`
RootCAs *x509.CertPool `json:"-"`
SkipVerify bool `json:"skipVerify"`
ClientAuth tls.ClientAuthType `json:"clientAuth"`
ClientTLSCert string `json:"clientTLSCert"`
ClientTLSKey string `json:"clientTLSKey"`
} `json:"tls"`
SASL struct {
Enable bool `json:"enable"`
User string `json:"username"`
Password string `json:"<PASSWORD>"`
Mechanism string `json:"mechanism"`
} `json:"sasl"`
// Queue store
QueueSize int `json:"queueSize"`
QueueDir string `json:"queueDir"`
// Custom logger
LogOnce func(ctx context.Context, err error, id string, errKind ...interface{}) `json:"-"`
}
// Check if atleast one broker in cluster is active
func (k Config) pingBrokers() (err error) {
d := net.Dialer{Timeout: 1 * time.Second}
errs := make([]error, len(k.Brokers))
var wg sync.WaitGroup
for idx, broker := range k.Brokers {
broker := broker
idx := idx
wg.Add(1)
go func(broker xnet.Host, idx int) {
defer wg.Done()
_, errs[idx] = d.Dial("tcp", broker.String())
}(broker, idx)
}
wg.Wait()
var retErr error
for _, err := range errs {
if err == nil {
// if one broker is online its enough
return nil
}
retErr = err
}
return retErr
}
// Target - Kafka target.
type Target struct {
totalMessages int64
failedMessages int64
wg sync.WaitGroup
// Channel of log entries.
// Reading logCh must hold read lock on logChMu (to avoid read race)
// Sending a value on logCh must hold read lock on logChMu (to avoid closing)
logCh chan interface{}
logChMu sync.RWMutex
// store to persist and replay the logs to the target
// to avoid missing events when the target is down.
store store.Store[interface{}]
storeCtxCancel context.CancelFunc
initKafkaOnce once.Init
initQueueStoreOnce once.Init
producer sarama.SyncProducer
kconfig Config
config *sarama.Config
}
func (h *Target) validate() error {
if len(h.kconfig.Brokers) == 0 {
return errors.New("no broker address found")
}
for _, b := range h.kconfig.Brokers {
if _, err := xnet.ParseHost(b.String()); err != nil {
return err
}
}
return nil
}
// Name returns the name of the target
func (h *Target) Name() string {
return "minio-kafka-audit"
}
// Endpoint - return kafka target
func (h *Target) Endpoint() string {
return "kafka"
}
// String - kafka string
func (h *Target) String() string {
return "kafka"
}
// Stats returns the target statistics.
func (h *Target) Stats() types.TargetStats {
h.logChMu.RLock()
queueLength := len(h.logCh)
h.logChMu.RUnlock()
return types.TargetStats{
TotalMessages: atomic.LoadInt64(&h.totalMessages),
FailedMessages: atomic.LoadInt64(&h.failedMessages),
QueueLength: queueLength,
}
}
// Init initialize kafka target
func (h *Target) Init(ctx context.Context) error {
if !h.kconfig.Enabled {
return nil
}
if err := h.validate(); err != nil {
return err
}
if h.kconfig.QueueDir != "" {
if err := h.initQueueStoreOnce.DoWithContext(ctx, h.initQueueStore); err != nil {
return err
}
return h.initKafkaOnce.Do(h.init)
}
if err := h.init(); err != nil {
return err
}
go h.startKakfaLogger()
return nil
}
func (h *Target) initQueueStore(ctx context.Context) (err error) {
var queueStore store.Store[interface{}]
queueDir := filepath.Join(h.kconfig.QueueDir, h.Name())
queueStore = store.NewQueueStore[interface{}](queueDir, uint64(h.kconfig.QueueSize), kafkaLoggerExtension)
if err = queueStore.Open(); err != nil {
return fmt.Errorf("unable to initialize the queue store of %s webhook: %w", h.Name(), err)
}
ctx, cancel := context.WithCancel(ctx)
h.store = queueStore
h.storeCtxCancel = cancel
store.StreamItems(h.store, h, ctx.Done(), h.kconfig.LogOnce)
return
}
func (h *Target) startKakfaLogger() {
h.logChMu.RLock()
logCh := h.logCh
if logCh != nil {
// We are not allowed to add when logCh is nil
h.wg.Add(1)
defer h.wg.Done()
}
h.logChMu.RUnlock()
if logCh == nil {
return
}
// Create a routine which sends json logs received
// from an internal channel.
for entry := range logCh {
h.logEntry(entry)
}
}
func (h *Target) logEntry(entry interface{}) {
atomic.AddInt64(&h.totalMessages, 1)
if err := h.send(entry); err != nil {
atomic.AddInt64(&h.failedMessages, 1)
h.kconfig.LogOnce(context.Background(), err, h.kconfig.Topic)
}
}
func (h *Target) send(entry interface{}) error {
if err := h.initKafkaOnce.Do(h.init); err != nil {
return err
}
logJSON, err := json.Marshal(&entry)
if err != nil {
return err
}
requestID := gjson.GetBytes(logJSON, "requestID").Str
if requestID == "" {
// unsupported data structure
return fmt.Errorf("unsupported data structure: %s must be either audit.Entry or log.Entry", reflect.TypeOf(entry))
}
msg := sarama.ProducerMessage{
Topic: h.kconfig.Topic,
Key: sarama.StringEncoder(requestID),
Value: sarama.ByteEncoder(logJSON),
}
_, _, err = h.producer.SendMessage(&msg)
return err
}
// Init initialize kafka target
func (h *Target) init() error {
if err := h.kconfig.pingBrokers(); err != nil {
return err
}
sconfig := sarama.NewConfig()
if h.kconfig.Version != "" {
kafkaVersion, err := sarama.ParseKafkaVersion(h.kconfig.Version)
if err != nil {
return err
}
sconfig.Version = kafkaVersion
}
sconfig.Net.KeepAlive = 60 * time.Second
sconfig.Net.SASL.User = h.kconfig.SASL.User
sconfig.Net.SASL.Password = <PASSWORD>
initScramClient(h.kconfig, sconfig) // initializes configured scram client.
sconfig.Net.SASL.Enable = h.kconfig.SASL.Enable
tlsConfig, err := saramatls.NewConfig(h.kconfig.TLS.ClientTLSCert, h.kconfig.TLS.ClientTLSKey)
if err != nil {
return err
}
sconfig.Net.TLS.Enable = h.kconfig.TLS.Enable
sconfig.Net.TLS.Config = tlsConfig
sconfig.Net.TLS.Config.InsecureSkipVerify = h.kconfig.TLS.SkipVerify
sconfig.Net.TLS.Config.ClientAuth = h.kconfig.TLS.ClientAuth
sconfig.Net.TLS.Config.RootCAs = h.kconfig.TLS.RootCAs
sconfig.Producer.RequiredAcks = sarama.WaitForAll
sconfig.Producer.Retry.Max = 10
sconfig.Producer.Return.Successes = true
h.config = sconfig
var brokers []string
for _, broker := range h.kconfig.Brokers {
brokers = append(brokers, broker.String())
}
producer, err := sarama.NewSyncProducer(brokers, sconfig)
if err != nil {
return err
}
h.producer = producer
return nil
}
// IsOnline returns true if the target is online.
func (h *Target) IsOnline(_ context.Context) bool {
if err := h.initKafkaOnce.Do(h.init); err != nil {
return false
}
return h.kconfig.pingBrokers() == nil
}
// Send log message 'e' to kafka target.
func (h *Target) Send(ctx context.Context, entry interface{}) error {
if h.store != nil {
// save the entry to the queue store which will be replayed to the target.
return h.store.Put(entry)
}
h.logChMu.RLock()
defer h.logChMu.RUnlock()
if h.logCh == nil {
// We are closing...
return nil
}
select {
case h.logCh <- entry:
default:
// log channel is full, do not wait and return
// an error immediately to the caller
atomic.AddInt64(&h.totalMessages, 1)
atomic.AddInt64(&h.failedMessages, 1)
return errors.New("log buffer full")
}
return nil
}
// SendFromStore - reads the log from store and sends it to kafka.
func (h *Target) SendFromStore(key string) (err error) {
auditEntry, err := h.store.Get(key)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
atomic.AddInt64(&h.totalMessages, 1)
err = h.send(auditEntry)
if err != nil {
atomic.AddInt64(&h.failedMessages, 1)
return
}
// Delete the event from store.
return h.store.Del(key)
}
// Cancel - cancels the target
func (h *Target) Cancel() {
// If queuestore is configured, cancel it's context to
// stop the replay go-routine.
if h.store != nil {
h.storeCtxCancel()
}
// Set logch to nil and close it.
// This will block all Send operations,
// and finish the existing ones.
// All future ones will be discarded.
h.logChMu.Lock()
close(h.logCh)
h.logCh = nil
h.logChMu.Unlock()
if h.producer != nil {
h.producer.Close()
}
// Wait for messages to be sent...
h.wg.Wait()
}
// New initializes a new logger target which
// sends log over http to the specified endpoint
func New(config Config) *Target {
target := &Target{
logCh: make(chan interface{}, config.QueueSize),
kconfig: config,
}
return target
}
// Type - returns type of the target
func (h *Target) Type() types.TargetType {
return types.TargetKafka
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"os"
"strings"
"sync"
"time"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/logger"
"github.com/pkg/sftp"
"golang.org/x/crypto/ssh"
)
type sftpDriver struct {
permissions *ssh.Permissions
endpoint string
}
//msgp:ignore sftpMetrics
type sftpMetrics struct{}
var globalSftpMetrics sftpMetrics
func sftpTrace(s *sftp.Request, startTime time.Time, source string, user string, err error) madmin.TraceInfo {
var errStr string
if err != nil {
errStr = err.Error()
}
return madmin.TraceInfo{
TraceType: madmin.TraceFTP,
Time: startTime,
NodeName: globalLocalNodeName,
FuncName: fmt.Sprintf("sftp USER=%s COMMAND=%s PARAM=%s, Source=%s", user, s.Method, s.Filepath, source),
Duration: time.Since(startTime),
Path: s.Filepath,
Error: errStr,
}
}
func (m *sftpMetrics) log(s *sftp.Request, user string) func(err error) {
startTime := time.Now()
source := getSource(2)
return func(err error) {
globalTrace.Publish(sftpTrace(s, startTime, source, user, err))
}
}
// NewSFTPDriver initializes sftp.Handlers implementation of following interfaces
//
// - sftp.Fileread
// - sftp.Filewrite
// - sftp.Filelist
// - sftp.Filecmd
func NewSFTPDriver(perms *ssh.Permissions) sftp.Handlers {
handler := &sftpDriver{endpoint: fmt.Sprintf("127.0.0.1:%s", globalMinioPort), permissions: perms}
return sftp.Handlers{
FileGet: handler,
FilePut: handler,
FileCmd: handler,
FileList: handler,
}
}
func (f *sftpDriver) getMinIOClient() (*minio.Client, error) {
ui, ok := globalIAMSys.GetUser(context.Background(), f.AccessKey())
if !ok && !globalIAMSys.LDAPConfig.Enabled() {
return nil, errNoSuchUser
}
if !ok && globalIAMSys.LDAPConfig.Enabled() {
targetUser, targetGroups, err := globalIAMSys.LDAPConfig.LookupUserDN(f.AccessKey())
if err != nil {
return nil, err
}
ldapPolicies, _ := globalIAMSys.PolicyDBGet(targetUser, false, targetGroups...)
if len(ldapPolicies) == 0 {
return nil, errAuthentication
}
expiryDur, err := globalIAMSys.LDAPConfig.GetExpiryDuration("")
if err != nil {
return nil, err
}
claims := make(map[string]interface{})
claims[expClaim] = UTCNow().Add(expiryDur).Unix()
claims[ldapUser] = targetUser
claims[ldapUserN] = f.AccessKey()
cred, err := auth.GetNewCredentialsWithMetadata(claims, globalActiveCred.SecretKey)
if err != nil {
return nil, err
}
// Set the parent of the temporary access key, this is useful
// in obtaining service accounts by this cred.
cred.ParentUser = targetUser
// Set this value to LDAP groups, LDAP user can be part
// of large number of groups
cred.Groups = targetGroups
// Set the newly generated credentials, policyName is empty on purpose
// LDAP policies are applied automatically using their ldapUser, ldapGroups
// mapping.
updatedAt, err := globalIAMSys.SetTempUser(context.Background(), cred.AccessKey, cred, "")
if err != nil {
return nil, err
}
// Call hook for site replication.
logger.LogIf(context.Background(), globalSiteReplicationSys.IAMChangeHook(context.Background(), madmin.SRIAMItem{
Type: madmin.SRIAMItemSTSAcc,
STSCredential: &madmin.SRSTSCredential{
AccessKey: cred.AccessKey,
SecretKey: cred.SecretKey,
SessionToken: cred.SessionToken,
ParentUser: cred.ParentUser,
},
UpdatedAt: updatedAt,
}))
return minio.New(f.endpoint, &minio.Options{
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
Secure: globalIsTLS,
Transport: globalRemoteTargetTransport,
})
}
// ok == true - at this point
if ui.Credentials.IsTemp() {
// Temporary credentials are not allowed.
return nil, errAuthentication
}
return minio.New(f.endpoint, &minio.Options{
Creds: credentials.NewStaticV4(ui.Credentials.AccessKey, ui.Credentials.SecretKey, ""),
Secure: globalIsTLS,
Transport: globalRemoteTargetTransport,
})
}
func (f *sftpDriver) AccessKey() string {
return f.permissions.CriticalOptions["accessKey"]
}
func (f *sftpDriver) Fileread(r *sftp.Request) (ra io.ReaderAt, err error) {
stopFn := globalSftpMetrics.log(r, f.AccessKey())
defer stopFn(err)
flags := r.Pflags()
if !flags.Read {
// sanity check
return nil, os.ErrInvalid
}
bucket, object := path2BucketObject(r.Filepath)
if bucket == "" {
return nil, errors.New("bucket name cannot be empty")
}
clnt, err := f.getMinIOClient()
if err != nil {
return nil, err
}
obj, err := clnt.GetObject(context.Background(), bucket, object, minio.GetObjectOptions{})
if err != nil {
return nil, err
}
_, err = obj.Stat()
if err != nil {
return nil, err
}
return obj, nil
}
type writerAt struct {
w *io.PipeWriter
wg *sync.WaitGroup
}
func (w *writerAt) Close() error {
err := w.w.Close()
w.wg.Wait()
return err
}
func (w *writerAt) WriteAt(b []byte, offset int64) (n int, err error) {
return w.w.Write(b)
}
func (f *sftpDriver) Filewrite(r *sftp.Request) (w io.WriterAt, err error) {
stopFn := globalSftpMetrics.log(r, f.AccessKey())
defer stopFn(err)
flags := r.Pflags()
if !flags.Write {
// sanity check
return nil, os.ErrInvalid
}
bucket, object := path2BucketObject(r.Filepath)
if bucket == "" {
return nil, errors.New("bucket name cannot be empty")
}
clnt, err := f.getMinIOClient()
if err != nil {
return nil, err
}
pr, pw := io.Pipe()
wa := &writerAt{w: pw, wg: &sync.WaitGroup{}}
wa.wg.Add(1)
go func() {
_, err := clnt.PutObject(r.Context(), bucket, object, pr, -1, minio.PutObjectOptions{SendContentMd5: true})
pr.CloseWithError(err)
wa.wg.Done()
}()
return wa, nil
}
func (f *sftpDriver) Filecmd(r *sftp.Request) (err error) {
stopFn := globalSftpMetrics.log(r, f.AccessKey())
defer stopFn(err)
clnt, err := f.getMinIOClient()
if err != nil {
return err
}
switch r.Method {
case "Setstat", "Rename", "Link", "Symlink":
return NotImplemented{}
case "Rmdir":
bucket, prefix := path2BucketObject(r.Filepath)
if bucket == "" {
return errors.New("deleting all buckets not allowed")
}
cctx, cancel := context.WithCancel(context.Background())
defer cancel()
objectsCh := make(chan minio.ObjectInfo)
// Send object names that are needed to be removed to objectsCh
go func() {
defer close(objectsCh)
opts := minio.ListObjectsOptions{Prefix: prefix, Recursive: true}
for object := range clnt.ListObjects(cctx, bucket, opts) {
if object.Err != nil {
return
}
objectsCh <- object
}
}()
// Call RemoveObjects API
for err := range clnt.RemoveObjects(context.Background(), bucket, objectsCh, minio.RemoveObjectsOptions{}) {
if err.Err != nil {
return err.Err
}
}
case "Remove":
bucket, object := path2BucketObject(r.Filepath)
if bucket == "" {
return errors.New("bucket name cannot be empty")
}
return clnt.RemoveObject(context.Background(), bucket, object, minio.RemoveObjectOptions{})
case "Mkdir":
bucket, prefix := path2BucketObject(r.Filepath)
if bucket == "" {
return errors.New("bucket name cannot be empty")
}
dirPath := buildMinioDir(prefix)
_, err = clnt.PutObject(context.Background(), bucket, dirPath, bytes.NewReader([]byte("")), 0,
// Always send Content-MD5 to succeed with bucket with
// locking enabled. There is no performance hit since
// this is always an empty object
minio.PutObjectOptions{SendContentMd5: true},
)
return err
}
return NotImplemented{}
}
type listerAt []os.FileInfo
// Modeled after strings.Reader's ReadAt() implementation
func (f listerAt) ListAt(ls []os.FileInfo, offset int64) (int, error) {
var n int
if offset >= int64(len(f)) {
return 0, io.EOF
}
n = copy(ls, f[offset:])
if n < len(ls) {
return n, io.EOF
}
return n, nil
}
func (f *sftpDriver) Filelist(r *sftp.Request) (la sftp.ListerAt, err error) {
stopFn := globalSftpMetrics.log(r, f.AccessKey())
defer stopFn(err)
clnt, err := f.getMinIOClient()
if err != nil {
return nil, err
}
switch r.Method {
case "List":
var files []os.FileInfo
bucket, prefix := path2BucketObject(r.Filepath)
if bucket == "" {
buckets, err := clnt.ListBuckets(r.Context())
if err != nil {
return nil, err
}
for _, bucket := range buckets {
files = append(files, &minioFileInfo{
p: bucket.Name,
info: minio.ObjectInfo{Key: bucket.Name, LastModified: bucket.CreationDate},
isDir: true,
})
}
return listerAt(files), nil
}
prefix = retainSlash(prefix)
for object := range clnt.ListObjects(r.Context(), bucket, minio.ListObjectsOptions{
Prefix: prefix,
Recursive: false,
}) {
if object.Err != nil {
return nil, object.Err
}
if object.Key == prefix {
continue
}
isDir := strings.HasSuffix(object.Key, SlashSeparator)
files = append(files, &minioFileInfo{
p: pathClean(strings.TrimPrefix(object.Key, prefix)),
info: object,
isDir: isDir,
})
}
return listerAt(files), nil
case "Stat":
if r.Filepath == SlashSeparator {
return listerAt{&minioFileInfo{
p: r.Filepath,
isDir: true,
}}, nil
}
bucket, object := path2BucketObject(r.Filepath)
if bucket == "" {
return nil, errors.New("bucket name cannot be empty")
}
if object == "" {
ok, err := clnt.BucketExists(context.Background(), bucket)
if err != nil {
return nil, err
}
if !ok {
return nil, os.ErrNotExist
}
return listerAt{&minioFileInfo{
p: pathClean(bucket),
info: minio.ObjectInfo{Key: bucket},
isDir: true,
}}, nil
}
objInfo, err := clnt.StatObject(context.Background(), bucket, object, minio.StatObjectOptions{})
if err != nil {
if minio.ToErrorResponse(err).Code == "NoSuchKey" {
// dummy return to satisfy LIST (stat -> list) behavior.
return listerAt{&minioFileInfo{
p: pathClean(object),
info: minio.ObjectInfo{Key: object},
isDir: true,
}}, nil
}
return nil, err
}
isDir := strings.HasSuffix(objInfo.Key, SlashSeparator)
return listerAt{&minioFileInfo{
p: pathClean(object),
info: objInfo,
isDir: isDir,
}}, nil
}
return nil, NotImplemented{}
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package store
import (
"context"
"errors"
"fmt"
"strings"
"time"
xnet "github.com/minio/pkg/net"
)
const (
retryInterval = 3 * time.Second
)
type logger = func(ctx context.Context, err error, id string, errKind ...interface{})
// ErrNotConnected - indicates that the target connection is not active.
var ErrNotConnected = errors.New("not connected to target server/service")
// Target - store target interface
type Target interface {
Name() string
SendFromStore(key string) error
}
// Store - Used to persist items.
type Store[I any] interface {
Put(item I) error
Get(key string) (I, error)
Len() int
List() ([]string, error)
Del(key string) error
Open() error
Extension() string
}
// replayItems - Reads the items from the store and replays.
func replayItems[I any](store Store[I], doneCh <-chan struct{}, log logger, id string) <-chan string {
itemKeyCh := make(chan string)
go func() {
defer close(itemKeyCh)
retryTicker := time.NewTicker(retryInterval)
defer retryTicker.Stop()
for {
names, err := store.List()
if err != nil {
log(context.Background(), fmt.Errorf("store.List() failed with: %w", err), id)
} else {
for _, name := range names {
select {
case itemKeyCh <- strings.TrimSuffix(name, store.Extension()):
// Get next key.
case <-doneCh:
return
}
}
}
select {
case <-retryTicker.C:
case <-doneCh:
return
}
}
}()
return itemKeyCh
}
// sendItems - Reads items from the store and re-plays.
func sendItems(target Target, itemKeyCh <-chan string, doneCh <-chan struct{}, logger logger) {
retryTicker := time.NewTicker(retryInterval)
defer retryTicker.Stop()
send := func(itemKey string) bool {
for {
err := target.SendFromStore(itemKey)
if err == nil {
break
}
if err != ErrNotConnected && !xnet.IsConnResetErr(err) {
logger(context.Background(),
fmt.Errorf("target.SendFromStore() failed with '%w'", err),
target.Name())
}
// Retrying after 3secs back-off
select {
case <-retryTicker.C:
case <-doneCh:
return false
}
}
return true
}
for {
select {
case itemKey, ok := <-itemKeyCh:
if !ok {
// closed channel.
return
}
if !send(itemKey) {
return
}
case <-doneCh:
return
}
}
}
// StreamItems reads the keys from the store and replays the corresponding item to the target.
func StreamItems[I any](store Store[I], target Target, doneCh <-chan struct{}, logger logger) {
go func() {
// Replays the items from the store.
itemKeyCh := replayItems(store, doneCh, logger, target.Name())
// Send items from the store.
sendItems(target, itemKeyCh, doneCh, logger)
}()
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package amztime
import (
"testing"
"time"
)
func TestISO8601Format(t *testing.T) {
testCases := []struct {
date time.Time
expectedOutput string
}{
{
date: time.Date(2009, time.November, 13, 4, 51, 1, 940303531, time.UTC),
expectedOutput: "2009-11-13T04:51:01.940Z",
},
{
date: time.Date(2009, time.November, 13, 4, 51, 1, 901303531, time.UTC),
expectedOutput: "2009-11-13T04:51:01.901Z",
},
{
date: time.Date(2009, time.November, 13, 4, 51, 1, 900303531, time.UTC),
expectedOutput: "2009-11-13T04:51:01.900Z",
},
{
date: time.Date(2009, time.November, 13, 4, 51, 1, 941303531, time.UTC),
expectedOutput: "2009-11-13T04:51:01.941Z",
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.expectedOutput, func(t *testing.T) {
gotOutput := ISO8601Format(testCase.date)
t.Log("Go", testCase.date.Format(iso8601TimeFormat))
if gotOutput != testCase.expectedOutput {
t.Errorf("Expected %s, got %s", testCase.expectedOutput, gotOutput)
}
})
}
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package lambda
import (
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/event/target"
)
// Help template inputs for all lambda targets
var (
HelpWebhook = config.HelpKVS{
config.HelpKV{
Key: target.WebhookEndpoint,
Description: "webhook server endpoint e.g. http://localhost:8080/minio/lambda",
Type: "url",
Sensitive: true,
},
config.HelpKV{
Key: target.WebhookAuthToken,
Description: "opaque string or JWT authorization token",
Optional: true,
Type: "string",
Sensitive: true,
Secret: true,
},
config.HelpKV{
Key: config.Comment,
Description: config.DefaultComment,
Optional: true,
Type: "sentence",
},
config.HelpKV{
Key: target.WebhookClientCert,
Description: "client cert for Webhook mTLS auth",
Optional: true,
Type: "string",
Sensitive: true,
},
config.HelpKV{
Key: target.WebhookClientKey,
Description: "client cert key for Webhook mTLS auth",
Optional: true,
Type: "string",
Sensitive: true,
},
}
)
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package openid
import (
"crypto/sha1"
"encoding/base64"
"errors"
"io"
"net/http"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/arn"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/config/identity/openid/provider"
"github.com/minio/minio/internal/hash/sha256"
iampolicy "github.com/minio/pkg/iam/policy"
xnet "github.com/minio/pkg/net"
)
// OpenID keys and envs.
const (
ClientID = "client_id"
ClientSecret = "client_secret"
ConfigURL = "config_url"
ClaimName = "claim_name"
ClaimUserinfo = "claim_userinfo"
RolePolicy = "role_policy"
DisplayName = "display_name"
Scopes = "scopes"
RedirectURI = "redirect_uri"
RedirectURIDynamic = "redirect_uri_dynamic"
Vendor = "vendor"
// Vendor specific ENV only enabled if the Vendor matches == "vendor"
KeyCloakRealm = "keycloak_realm"
KeyCloakAdminURL = "keycloak_admin_url"
// Removed params
JwksURL = "jwks_url"
ClaimPrefix = "claim_prefix"
)
// DefaultKVS - default config for OpenID config
var (
DefaultKVS = config.KVS{
config.KV{
Key: config.Enable,
Value: "",
},
config.KV{
Key: DisplayName,
Value: "",
},
config.KV{
Key: ConfigURL,
Value: "",
},
config.KV{
Key: ClientID,
Value: "",
},
config.KV{
Key: ClientSecret,
Value: "",
},
config.KV{
Key: ClaimName,
Value: iampolicy.PolicyName,
},
config.KV{
Key: ClaimUserinfo,
Value: "",
},
config.KV{
Key: RolePolicy,
Value: "",
},
config.KV{
Key: ClaimPrefix,
Value: "",
},
config.KV{
Key: RedirectURI,
Value: "",
},
config.KV{
Key: RedirectURIDynamic,
Value: "off",
},
config.KV{
Key: Scopes,
Value: "",
},
config.KV{
Key: Vendor,
Value: "",
},
config.KV{
Key: KeyCloakRealm,
Value: "",
},
config.KV{
Key: KeyCloakAdminURL,
Value: "",
},
}
)
var errSingleProvider = config.Errorf("Only one OpenID provider can be configured if not using role policy mapping")
// DummyRoleARN is used to indicate that the user associated with it was
// authenticated via policy-claim based OpenID provider.
var DummyRoleARN = func() arn.ARN {
v, err := arn.NewIAMRoleARN("dummy-internal", "")
if err != nil {
panic("should not happen!")
}
return v
}()
// Config - OpenID Config
type Config struct {
Enabled bool
// map of roleARN to providerCfg's
arnProviderCfgsMap map[arn.ARN]*providerCfg
// map of config names to providerCfg's
ProviderCfgs map[string]*providerCfg
pubKeys publicKeys
roleArnPolicyMap map[arn.ARN]string
transport http.RoundTripper
closeRespFn func(io.ReadCloser)
}
// Clone returns a cloned copy of OpenID config.
func (r *Config) Clone() Config {
if r == nil {
return Config{}
}
cfg := Config{
Enabled: r.Enabled,
arnProviderCfgsMap: make(map[arn.ARN]*providerCfg, len(r.arnProviderCfgsMap)),
ProviderCfgs: make(map[string]*providerCfg, len(r.ProviderCfgs)),
pubKeys: r.pubKeys,
roleArnPolicyMap: make(map[arn.ARN]string, len(r.roleArnPolicyMap)),
transport: r.transport,
closeRespFn: r.closeRespFn,
}
for k, v := range r.arnProviderCfgsMap {
cfg.arnProviderCfgsMap[k] = v
}
for k, v := range r.ProviderCfgs {
cfg.ProviderCfgs[k] = v
}
for k, v := range r.roleArnPolicyMap {
cfg.roleArnPolicyMap[k] = v
}
return cfg
}
// LookupConfig lookup jwks from config, override with any ENVs.
func LookupConfig(s config.Config, transport http.RoundTripper, closeRespFn func(io.ReadCloser), serverRegion string) (c Config, err error) {
openIDClientTransport := http.DefaultTransport
if transport != nil {
openIDClientTransport = transport
}
c = Config{
Enabled: false,
arnProviderCfgsMap: map[arn.ARN]*providerCfg{},
ProviderCfgs: map[string]*providerCfg{},
pubKeys: publicKeys{
RWMutex: &sync.RWMutex{},
pkMap: map[string]interface{}{},
},
roleArnPolicyMap: map[arn.ARN]string{},
transport: openIDClientTransport,
closeRespFn: closeRespFn,
}
seenClientIDs := set.NewStringSet()
deprecatedKeys := []string{JwksURL}
// remove this since we have removed support for this already.
for k := range s[config.IdentityOpenIDSubSys] {
for _, dk := range deprecatedKeys {
kvs := s[config.IdentityOpenIDSubSys][k]
kvs.Delete(dk)
s[config.IdentityOpenIDSubSys][k] = kvs
}
}
if err := s.CheckValidKeys(config.IdentityOpenIDSubSys, deprecatedKeys); err != nil {
return c, err
}
openIDTargets, err := s.GetAvailableTargets(config.IdentityOpenIDSubSys)
if err != nil {
return c, err
}
for _, cfgName := range openIDTargets {
getCfgVal := func(cfgParam string) string {
// As parameters are already validated, we skip checking
// if the config param was found.
val, _, _ := s.ResolveConfigParam(config.IdentityOpenIDSubSys, cfgName, cfgParam, false)
return val
}
// In the past, when only one openID provider was allowed, there
// was no `enable` parameter - the configuration is turned off
// by clearing the values. With multiple providers, we support
// individually enabling/disabling provider configurations. If
// the enable parameter's value is non-empty, we use that
// setting, otherwise we treat it as enabled if some important
// parameters are non-empty.
var (
cfgEnableVal = getCfgVal(config.Enable)
isExplicitlyEnabled = false
)
if cfgEnableVal != "" {
isExplicitlyEnabled = true
}
var enabled bool
if isExplicitlyEnabled {
enabled, err = config.ParseBool(cfgEnableVal)
if err != nil {
return c, err
}
// No need to continue loading if the config is not enabled.
if !enabled {
continue
}
}
p := newProviderCfgFromConfig(getCfgVal)
configURL := getCfgVal(ConfigURL)
if !isExplicitlyEnabled {
enabled = true
if p.ClientID == "" && p.ClientSecret == "" && configURL == "" {
enabled = false
}
}
// No need to continue loading if the config is not enabled.
if !enabled {
continue
}
// Validate that client ID has not been duplicately specified.
if seenClientIDs.Contains(p.ClientID) {
return c, config.Errorf("Client ID %s is present with multiple OpenID configurations", p.ClientID)
}
seenClientIDs.Add(p.ClientID)
p.URL, err = xnet.ParseHTTPURL(configURL)
if err != nil {
return c, err
}
configURLDomain := p.URL.Hostname()
p.DiscoveryDoc, err = parseDiscoveryDoc(p.URL, transport, closeRespFn)
if err != nil {
return c, err
}
if p.ClaimUserinfo && configURL == "" {
return c, errors.New("please specify config_url to enable fetching claims from UserInfo endpoint")
}
if scopeList := getCfgVal(Scopes); scopeList != "" {
var scopes []string
for _, scope := range strings.Split(scopeList, ",") {
scope = strings.TrimSpace(scope)
if scope == "" {
return c, config.Errorf("empty scope value is not allowed '%s', please refer to our documentation", scopeList)
}
scopes = append(scopes, scope)
}
// Replace the discovery document scopes by client customized scopes.
p.DiscoveryDoc.ScopesSupported = scopes
}
// Check if claim name is the non-default value and role policy is set.
if p.ClaimName != iampolicy.PolicyName && p.RolePolicy != "" {
// In the unlikely event that the user specifies
// `iampolicy.PolicyName` as the claim name explicitly and sets
// a role policy, this check is thwarted, but we will be using
// the role policy anyway.
return c, config.Errorf("Role Policy (=`%s`) and Claim Name (=`%s`) cannot both be set", p.RolePolicy, p.ClaimName)
}
jwksURL := p.DiscoveryDoc.JwksURI
if jwksURL == "" {
return c, config.Errorf("no JWKS URI found in your provider's discovery doc (config_url=%s)", configURL)
}
p.JWKS.URL, err = xnet.ParseHTTPURL(jwksURL)
if err != nil {
return c, err
}
if p.RolePolicy != "" {
// RolePolicy is validated by IAM System during its
// initialization.
// Generate role ARN as combination of provider domain and
// prefix of client ID.
domain := configURLDomain
if domain == "" {
// Attempt to parse the JWKs URI.
domain = p.JWKS.URL.Hostname()
if domain == "" {
return c, config.Errorf("unable to parse a domain from the OpenID config")
}
}
if p.ClientID == "" {
return c, config.Errorf("client ID must not be empty")
}
// We set the resource ID of the role arn as a hash of client
// ID, so we can get a short roleARN that stays the same on
// restart.
var resourceID string
{
h := sha1.New()
h.Write([]byte(p.ClientID))
bs := h.Sum(nil)
resourceID = base64.RawURLEncoding.EncodeToString(bs)
}
p.roleArn, err = arn.NewIAMRoleARN(resourceID, serverRegion)
if err != nil {
return c, config.Errorf("unable to generate ARN from the OpenID config: %v", err)
}
c.roleArnPolicyMap[p.roleArn] = p.RolePolicy
} else if p.ClaimName == "" {
return c, config.Errorf("A role policy or claim name must be specified")
}
if err = p.initializeProvider(getCfgVal, c.transport); err != nil {
return c, err
}
arnKey := p.roleArn
if p.RolePolicy == "" {
arnKey = DummyRoleARN
// Ensure that at most one JWT policy claim based provider may be
// defined.
if _, ok := c.arnProviderCfgsMap[DummyRoleARN]; ok {
return c, errSingleProvider
}
}
c.arnProviderCfgsMap[arnKey] = &p
c.ProviderCfgs[cfgName] = &p
if err = c.PopulatePublicKey(arnKey); err != nil {
return c, err
}
}
c.Enabled = true
return c, nil
}
// ErrProviderConfigNotFound - represents a non-existing provider error.
var ErrProviderConfigNotFound = errors.New("provider configuration not found")
// GetConfigInfo - returns configuration and related info for the given IDP
// provider.
func (r *Config) GetConfigInfo(s config.Config, cfgName string) ([]madmin.IDPCfgInfo, error) {
openIDConfigs, err := s.GetAvailableTargets(config.IdentityOpenIDSubSys)
if err != nil {
return nil, err
}
present := false
for _, cfg := range openIDConfigs {
if cfg == cfgName {
present = true
break
}
}
if !present {
return nil, ErrProviderConfigNotFound
}
kvsrcs, err := s.GetResolvedConfigParams(config.IdentityOpenIDSubSys, cfgName, true)
if err != nil {
return nil, err
}
res := make([]madmin.IDPCfgInfo, 0, len(kvsrcs)+1)
for _, kvsrc := range kvsrcs {
// skip default values.
if kvsrc.Src == config.ValueSourceDef {
if kvsrc.Key != madmin.EnableKey {
continue
}
// set an explicit on/off from live configuration.
kvsrc.Value = "off"
if _, ok := r.ProviderCfgs[cfgName]; ok {
if r.Enabled {
kvsrc.Value = "on"
}
}
}
res = append(res, madmin.IDPCfgInfo{
Key: kvsrc.Key,
Value: kvsrc.Value,
IsCfg: true,
IsEnv: kvsrc.Src == config.ValueSourceEnv,
})
}
if provCfg, exists := r.ProviderCfgs[cfgName]; exists && provCfg.RolePolicy != "" {
// Append roleARN
res = append(res, madmin.IDPCfgInfo{
Key: "roleARN",
Value: provCfg.roleArn.String(),
IsCfg: false,
})
}
// sort the structs by the key
sort.Slice(res, func(i, j int) bool {
return res[i].Key < res[j].Key
})
return res, nil
}
// GetConfigList - list openID configurations
func (r *Config) GetConfigList(s config.Config) ([]madmin.IDPListItem, error) {
openIDConfigs, err := s.GetAvailableTargets(config.IdentityOpenIDSubSys)
if err != nil {
return nil, err
}
var res []madmin.IDPListItem
for _, cfg := range openIDConfigs {
pcfg, ok := r.ProviderCfgs[cfg]
if !ok {
res = append(res, madmin.IDPListItem{
Type: "openid",
Name: cfg,
Enabled: false,
})
} else {
var roleARN string
if pcfg.RolePolicy != "" {
roleARN = pcfg.roleArn.String()
}
res = append(res, madmin.IDPListItem{
Type: "openid",
Name: cfg,
Enabled: r.Enabled,
RoleARN: roleARN,
})
}
}
return res, nil
}
// Enabled returns if configURL is enabled.
func Enabled(kvs config.KVS) bool {
return kvs.Get(ConfigURL) != ""
}
// GetSettings - fetches OIDC settings for site-replication related validation.
// NOTE that region must be populated by caller as this package does not know.
func (r *Config) GetSettings() madmin.OpenIDSettings {
res := madmin.OpenIDSettings{}
if !r.Enabled {
return res
}
h := sha256.New()
for arn, provCfg := range r.arnProviderCfgsMap {
hashedSecret := ""
{
h.Reset()
h.Write([]byte(provCfg.ClientSecret))
hashedSecret = base64.RawURLEncoding.EncodeToString(h.Sum(nil))
}
if arn != DummyRoleARN {
if res.Roles == nil {
res.Roles = make(map[string]madmin.OpenIDProviderSettings)
}
res.Roles[arn.String()] = madmin.OpenIDProviderSettings{
ClaimUserinfoEnabled: provCfg.ClaimUserinfo,
RolePolicy: provCfg.RolePolicy,
ClientID: provCfg.ClientID,
HashedClientSecret: hashedSecret,
}
} else {
res.ClaimProvider = madmin.OpenIDProviderSettings{
ClaimUserinfoEnabled: provCfg.ClaimUserinfo,
RolePolicy: provCfg.RolePolicy,
ClientID: provCfg.ClientID,
HashedClientSecret: hashedSecret,
}
}
}
return res
}
// GetIAMPolicyClaimName - returns the policy claim name for the (at most one)
// provider configured without a role policy.
func (r *Config) GetIAMPolicyClaimName() string {
pCfg, ok := r.arnProviderCfgsMap[DummyRoleARN]
if !ok {
return ""
}
return pCfg.ClaimPrefix + pCfg.ClaimName
}
// LookupUser lookup userid for the provider
func (r Config) LookupUser(roleArn, userid string) (provider.User, error) {
// Can safely ignore error here as empty or invalid ARNs will not be
// mapped.
arnVal, _ := arn.Parse(roleArn)
pCfg, ok := r.arnProviderCfgsMap[arnVal]
if ok {
user, err := pCfg.provider.LookupUser(userid)
if err != nil && err != provider.ErrAccessTokenExpired {
return user, err
}
if err == provider.ErrAccessTokenExpired {
if err = pCfg.provider.LoginWithClientID(pCfg.ClientID, pCfg.ClientSecret); err != nil {
return user, err
}
user, err = pCfg.provider.LookupUser(userid)
}
return user, err
}
// Without any specific logic for a provider, all accounts
// are always enabled.
return provider.User{ID: userid, Enabled: true}, nil
}
// ProviderEnabled returns true if any vendor specific provider is enabled.
func (r Config) ProviderEnabled() bool {
if !r.Enabled {
return false
}
for _, v := range r.arnProviderCfgsMap {
if v.provider != nil {
return true
}
}
return false
}
// GetRoleInfo - returns ARN to policies map if a role policy based openID
// provider is configured. Otherwise returns nil.
func (r Config) GetRoleInfo() map[arn.ARN]string {
for _, p := range r.arnProviderCfgsMap {
if p.RolePolicy != "" {
return r.roleArnPolicyMap
}
}
return nil
}
// GetDefaultExpiration - returns the expiration seconds expected.
func GetDefaultExpiration(dsecs string) (time.Duration, error) {
defaultExpiryDuration := time.Duration(60) * time.Minute // Defaults to 1hr.
if dsecs != "" {
expirySecs, err := strconv.ParseInt(dsecs, 10, 64)
if err != nil {
return 0, auth.ErrInvalidDuration
}
// The duration, in seconds, of the role session.
// The value can range from 900 seconds (15 minutes)
// up to 365 days.
if expirySecs < 900 || expirySecs > 31536000 {
return 0, auth.ErrInvalidDuration
}
defaultExpiryDuration = time.Duration(expirySecs) * time.Second
}
return defaultExpiryDuration, nil
}
<file_sep># Bucket Quota Configuration Quickstart Guide [](https://slack.min.io) [](https://hub.docker.com/r/minio/minio/)

Buckets can be configured to have `Hard` quota - it disallows writes to the bucket after configured quota limit is reached.
## Prerequisites
- Install MinIO - [MinIO Quickstart Guide](https://min.io/docs/minio/linux/index.html#procedure).
- [Use `mc` with MinIO Server](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart)
## Set bucket quota configuration
### Set a hard quota of 1GB for a bucket `mybucket` on MinIO object storage
```sh
mc admin bucket quota myminio/mybucket --hard 1gb
```
### Verify the quota configured on `mybucket` on MinIO
```sh
mc admin bucket quota myminio/mybucket
```
### Clear bucket quota configuration for `mybucket` on MinIO
```sh
mc admin bucket quota myminio/mybucket --clear
```
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bufio"
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"net/url"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/minio/pkg/ellipses"
)
type xl struct {
This string `json:"this"`
Sets [][]string `json:"sets"`
}
type format struct {
ID string `json:"id"`
XL xl `json:"xl"`
}
func getMountMap() (map[string]string, error) {
result := make(map[string]string)
mountInfo, err := os.Open("/proc/self/mountinfo")
if err != nil {
return nil, err
}
defer mountInfo.Close()
scanner := bufio.NewScanner(mountInfo)
for scanner.Scan() {
s := strings.Split(scanner.Text(), " ")
if len(s) != 11 {
return nil, errors.New("unsupport /proc/self/mountinfo format")
}
result[s[2]] = s[9]
}
if err := scanner.Err(); err != nil {
return nil, err
}
return result, nil
}
func getDiskUUIDMap() (map[string]string, error) {
result := make(map[string]string)
err := filepath.Walk("/dev/disk/by-uuid/",
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
realPath, err := filepath.EvalSymlinks(path)
if err != nil {
return err
}
result[realPath] = strings.TrimPrefix(path, "/dev/disk/by-uuid/")
return nil
})
if err != nil {
return nil, err
}
return result, nil
}
type localDisk struct {
index int
path string
}
func getMajorMinor(path string) (string, error) {
var stat syscall.Stat_t
if err := syscall.Stat(path, &stat); err != nil {
return "", fmt.Errorf("unable to stat `%s`: %w", err)
}
major := (stat.Dev & 0x00000000000fff00) >> 8
major |= (stat.Dev & 0xfffff00000000000) >> 32
minor := (stat.Dev & 0x00000000000000ff) >> 0
minor |= (stat.Dev & 0x00000ffffff00000) >> 12
return fmt.Sprintf("%d:%d", major, minor), nil
}
func filterLocalDisks(node, args string) ([]localDisk, error) {
var result []localDisk
argP, err := ellipses.FindEllipsesPatterns(args)
if err != nil {
return nil, err
}
exp := argP.Expand()
if node == "" {
for index, e := range exp {
result = append(result, localDisk{index: index, path: strings.Join(e, "")})
}
} else {
for index, e := range exp {
u, err := url.Parse(strings.Join(e, ""))
if err != nil {
return nil, err
}
if strings.Contains(u.Host, node) {
result = append(result, localDisk{index: index, path: u.Path})
}
}
}
return result, nil
}
func getFormatJSON(path string) (format, error) {
formatJSON, err := ioutil.ReadFile(filepath.Join(path, ".minio.sys/format.json"))
if err != nil {
return format{}, err
}
var f format
err = json.Unmarshal(formatJSON, &f)
if err != nil {
return format{}, err
}
return f, nil
}
func getDiskLocation(f format) (string, error) {
for i, set := range f.XL.Sets {
for j, disk := range set {
if disk == f.XL.This {
return fmt.Sprintf("%d-%d", i, j), nil
}
}
}
return "", errors.New("format.json is corrupted")
}
func main() {
var node, args string
flag.StringVar(&node, "local-node-name", "", "the name of the local node")
flag.StringVar(&args, "args", "", "arguments passed to MinIO server")
flag.Parse()
localDisks, err := filterLocalDisks(node, args)
if err != nil {
log.Fatal(err)
}
if len(localDisks) == 0 {
log.Fatal("Fix --local-node-name or/and --args to select local disks.")
}
format, err := getFormatJSON(localDisks[0].path)
if err != nil {
log.Fatal(err)
}
setSize := len(format.XL.Sets[0])
expectedDisksName := make(map[string]string)
actualDisksName := make(map[string]string)
// Calculate the set/disk index
for _, disk := range localDisks {
expectedDisksName[fmt.Sprintf("%d-%d", disk.index/setSize, disk.index%setSize)] = disk.path
format, err := getFormatJSON(disk.path)
if err != nil {
log.Printf("Unable to read format.json from `%s`, error: %v\n", disk.path, err)
continue
}
foundDiskLoc, err := getDiskLocation(format)
if err != nil {
log.Printf("Unable to get disk location of `%s`, error: %v\n", disk.path, err)
continue
}
actualDisksName[foundDiskLoc] = disk.path
}
uuidMap, err := getDiskUUIDMap()
if err != nil {
log.Fatal("Unable to analyze UUID in /dev/disk/by-uuid/:", err)
}
mountMap, err := getMountMap()
if err != nil {
log.Fatal("Unable to parse /proc/self/mountinfo:", err)
}
for loc, expectedDiskName := range expectedDisksName {
diskName := actualDisksName[loc]
if diskName == "" {
log.Printf("skipping disk location `%s`, err: %v\n", diskName, err)
continue
}
mami, err := getMajorMinor(diskName)
if err != nil {
log.Printf("skipping `%s`, err: %v\n", diskName, err)
continue
}
devName := mountMap[mami]
uuid := uuidMap[devName]
fmt.Printf("UUID=%s\t%s\txfs\tdefaults,noatime\t0\t2\n", uuid, expectedDiskName)
}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"fmt"
"runtime"
"strconv"
"time"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/env"
)
// healTask represents what to heal along with options
//
// path: '/' => Heal disk formats along with metadata
// path: 'bucket/' or '/bucket/' => Heal bucket
// path: 'bucket/object' => Heal object
type healTask struct {
bucket string
object string
versionID string
opts madmin.HealOpts
// Healing response will be sent here
respCh chan healResult
}
// healResult represents a healing result with a possible error
type healResult struct {
result madmin.HealResultItem
err error
}
// healRoutine receives heal tasks, to heal buckets, objects and format.json
type healRoutine struct {
tasks chan healTask
workers int
}
func activeListeners() int {
// Bucket notification and http trace are not costly, it is okay to ignore them
// while counting the number of concurrent connections
return int(globalHTTPListen.Subscribers()) + int(globalTrace.Subscribers())
}
func waitForLowIO(maxIO int, maxWait time.Duration, currentIO func() int) {
// No need to wait run at full speed.
if maxIO <= 0 {
return
}
const waitTick = 100 * time.Millisecond
tmpMaxWait := maxWait
for currentIO() >= maxIO {
if tmpMaxWait > 0 {
if tmpMaxWait < waitTick {
time.Sleep(tmpMaxWait)
} else {
time.Sleep(waitTick)
}
tmpMaxWait -= waitTick
}
if tmpMaxWait <= 0 {
return
}
}
}
func currentHTTPIO() int {
httpServer := newHTTPServerFn()
if httpServer == nil {
return 0
}
return httpServer.GetRequestCount() - activeListeners()
}
func waitForLowHTTPReq() {
maxIO, maxWait, _ := globalHealConfig.Clone()
waitForLowIO(maxIO, maxWait, currentHTTPIO)
}
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
// Run the background healer
globalBackgroundHealRoutine = newHealRoutine()
for i := 0; i < globalBackgroundHealRoutine.workers; i++ {
go globalBackgroundHealRoutine.AddWorker(ctx, objAPI)
}
globalBackgroundHealState.LaunchNewHealSequence(newBgHealSequence(), objAPI)
}
// Wait for heal requests and process them
func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer) {
for {
select {
case task, ok := <-h.tasks:
if !ok {
return
}
var res madmin.HealResultItem
var err error
switch task.bucket {
case nopHeal:
err = errSkipFile
case SlashSeparator:
res, err = healDiskFormat(ctx, objAPI, task.opts)
default:
if task.object == "" {
res, err = objAPI.HealBucket(ctx, task.bucket, task.opts)
} else {
res, err = objAPI.HealObject(ctx, task.bucket, task.object, task.versionID, task.opts)
}
}
if task.respCh != nil {
task.respCh <- healResult{result: res, err: err}
}
case <-ctx.Done():
return
}
}
}
func newHealRoutine() *healRoutine {
workers := runtime.GOMAXPROCS(0) / 2
if envHealWorkers := env.Get("_MINIO_HEAL_WORKERS", ""); envHealWorkers != "" {
if numHealers, err := strconv.Atoi(envHealWorkers); err != nil {
logger.LogIf(context.Background(), fmt.Errorf("invalid _MINIO_HEAL_WORKERS value: %w", err))
} else {
workers = numHealers
}
}
if workers == 0 {
workers = 4
}
return &healRoutine{
tasks: make(chan healTask),
workers: workers,
}
}
// healDiskFormat - heals format.json, return value indicates if a
// failure error occurred.
func healDiskFormat(ctx context.Context, objAPI ObjectLayer, opts madmin.HealOpts) (madmin.HealResultItem, error) {
res, err := objAPI.HealFormat(ctx, opts.DryRun)
// return any error, ignore error returned when disks have
// already healed.
if err != nil && err != errNoHealRequired {
return madmin.HealResultItem{}, err
}
return res, nil
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"time"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/disk"
)
type collectMetricsOpts struct {
hosts map[string]struct{}
disks map[string]struct{}
jobID string
depID string
}
func collectLocalMetrics(types madmin.MetricType, opts collectMetricsOpts) (m madmin.RealtimeMetrics) {
if types == madmin.MetricsNone {
return
}
if len(opts.hosts) > 0 {
if _, ok := opts.hosts[globalMinioAddr]; !ok {
return
}
}
if types.Contains(madmin.MetricsDisk) {
m.ByDisk = make(map[string]madmin.DiskMetric)
aggr := madmin.DiskMetric{
CollectedAt: time.Now(),
}
for name, disk := range collectLocalDisksMetrics(opts.disks) {
m.ByDisk[name] = disk
aggr.Merge(&disk)
}
m.Aggregated.Disk = &aggr
}
if types.Contains(madmin.MetricsScanner) {
metrics := globalScannerMetrics.report()
m.Aggregated.Scanner = &metrics
}
if types.Contains(madmin.MetricsOS) {
metrics := globalOSMetrics.report()
m.Aggregated.OS = &metrics
}
if types.Contains(madmin.MetricsBatchJobs) {
m.Aggregated.BatchJobs = globalBatchJobsMetrics.report(opts.jobID)
}
if types.Contains(madmin.MetricsSiteResync) {
m.Aggregated.SiteResync = globalSiteResyncMetrics.report(opts.depID)
}
// Add types...
// ByHost is a shallow reference, so careful about sharing.
m.ByHost = map[string]madmin.Metrics{globalMinioAddr: m.Aggregated}
m.Hosts = append(m.Hosts, globalMinioAddr)
return m
}
func collectLocalDisksMetrics(disks map[string]struct{}) map[string]madmin.DiskMetric {
objLayer := newObjectLayerFn()
if objLayer == nil {
return nil
}
metrics := make(map[string]madmin.DiskMetric)
procStats, procErr := disk.GetAllDrivesIOStats()
if procErr != nil {
return metrics
}
storageInfo := objLayer.LocalStorageInfo(GlobalContext)
for _, d := range storageInfo.Disks {
if len(disks) != 0 {
_, ok := disks[d.Endpoint]
if !ok {
continue
}
}
if d.State != madmin.DriveStateOk && d.State != madmin.DriveStateUnformatted {
metrics[d.Endpoint] = madmin.DiskMetric{NDisks: 1, Offline: 1}
continue
}
var dm madmin.DiskMetric
dm.NDisks = 1
if d.Healing {
dm.Healing++
}
if d.Metrics != nil {
dm.LifeTimeOps = make(map[string]uint64, len(d.Metrics.APICalls))
for k, v := range d.Metrics.APICalls {
if v != 0 {
dm.LifeTimeOps[k] = v
}
}
dm.LastMinute.Operations = make(map[string]madmin.TimedAction, len(d.Metrics.APICalls))
for k, v := range d.Metrics.LastMinute {
if v.Count != 0 {
dm.LastMinute.Operations[k] = v
}
}
}
// get disk
if procErr == nil {
st := procStats[disk.DevID{Major: d.Major, Minor: d.Minor}]
dm.IOStats = madmin.DiskIOStats{
ReadIOs: st.ReadIOs,
ReadMerges: st.ReadMerges,
ReadSectors: st.ReadSectors,
ReadTicks: st.ReadTicks,
WriteIOs: st.WriteIOs,
WriteMerges: st.WriteMerges,
WriteSectors: st.WriteSectors,
WriteTicks: st.WriteTicks,
CurrentIOs: st.CurrentIOs,
TotalTicks: st.TotalTicks,
ReqTicks: st.ReqTicks,
DiscardIOs: st.DiscardIOs,
DiscardMerges: st.DiscardMerges,
DiscardSectors: st.DiscardSectors,
DiscardTicks: st.DiscardTicks,
FlushIOs: st.FlushIOs,
FlushTicks: st.FlushTicks,
}
}
metrics[d.Endpoint] = dm
}
return metrics
}
func collectRemoteMetrics(ctx context.Context, types madmin.MetricType, opts collectMetricsOpts) (m madmin.RealtimeMetrics) {
if !globalIsDistErasure {
return
}
all := globalNotificationSys.GetMetrics(ctx, types, opts)
for _, remote := range all {
m.Merge(&remote)
}
return m
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package event
import (
"testing"
)
func TestARNString(t *testing.T) {
testCases := []struct {
arn ARN
expectedResult string
}{
{ARN{}, ""},
{ARN{TargetID{"1", "webhook"}, ""}, "arn:minio:s3-object-lambda::1:webhook"},
{ARN{TargetID{"1", "webhook"}, "us-east-1"}, "arn:minio:s3-object-lambda:us-east-1:1:webhook"},
}
for i, testCase := range testCases {
result := testCase.arn.String()
if result != testCase.expectedResult {
t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
}
}
}
func TestParseARN(t *testing.T) {
testCases := []struct {
s string
expectedARN *ARN
expectErr bool
}{
{"", nil, true},
{"arn:minio:s3-object-lambda:::", nil, true},
{"arn:minio:s3-object-lambda::1:webhook:remote", nil, true},
{"arn:aws:s3-object-lambda::1:webhook", nil, true},
{"arn:minio:sns::1:webhook", nil, true},
{"arn:minio:s3-object-lambda::1:webhook", &ARN{TargetID{"1", "webhook"}, ""}, false},
{"arn:minio:s3-object-lambda:us-east-1:1:webhook", &ARN{TargetID{"1", "webhook"}, "us-east-1"}, false},
}
for i, testCase := range testCases {
arn, err := ParseARN(testCase.s)
expectErr := (err != nil)
if expectErr != testCase.expectErr {
t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
}
if !testCase.expectErr {
if *arn != *testCase.expectedARN {
t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedARN, arn)
}
}
}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"strings"
"github.com/minio/minio/internal/bucket/versioning"
"github.com/minio/minio/internal/logger"
)
// BucketVersioningSys - policy subsystem.
type BucketVersioningSys struct{}
// Enabled enabled versioning?
func (sys *BucketVersioningSys) Enabled(bucket string) bool {
vc, err := sys.Get(bucket)
if err != nil {
logger.CriticalIf(GlobalContext, err)
}
return vc.Enabled()
}
// PrefixEnabled returns true is versioning is enabled at bucket level and if
// the given prefix doesn't match any excluded prefixes pattern. This is
// part of a MinIO versioning configuration extension.
func (sys *BucketVersioningSys) PrefixEnabled(bucket, prefix string) bool {
vc, err := sys.Get(bucket)
if err != nil {
logger.CriticalIf(GlobalContext, err)
}
return vc.PrefixEnabled(prefix)
}
// Suspended suspended versioning?
func (sys *BucketVersioningSys) Suspended(bucket string) bool {
vc, err := sys.Get(bucket)
if err != nil {
logger.CriticalIf(GlobalContext, err)
}
return vc.Suspended()
}
// PrefixSuspended returns true if the given prefix matches an excluded prefix
// pattern. This is part of a MinIO versioning configuration extension.
func (sys *BucketVersioningSys) PrefixSuspended(bucket, prefix string) bool {
vc, err := sys.Get(bucket)
if err != nil {
logger.CriticalIf(GlobalContext, err)
}
return vc.PrefixSuspended(prefix)
}
// Get returns stored bucket policy
func (sys *BucketVersioningSys) Get(bucket string) (*versioning.Versioning, error) {
if bucket == minioMetaBucket || strings.HasPrefix(bucket, minioMetaBucket) {
return &versioning.Versioning{XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/"}, nil
}
vcfg, _, err := globalBucketMetadataSys.GetVersioningConfig(bucket)
return vcfg, err
}
// NewBucketVersioningSys - creates new versioning system.
func NewBucketVersioningSys() *BucketVersioningSys {
return &BucketVersioningSys{}
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"encoding/base64"
"encoding/binary"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"net/url"
"reflect"
"runtime"
"sort"
"strings"
"sync"
"time"
"github.com/minio/madmin-go/v3"
minioClient "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio-go/v7/pkg/replication"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/auth"
sreplication "github.com/minio/minio/internal/bucket/replication"
"github.com/minio/minio/internal/logger"
bktpolicy "github.com/minio/pkg/bucket/policy"
iampolicy "github.com/minio/pkg/iam/policy"
)
const (
srStatePrefix = minioConfigPrefix + "/site-replication"
srStateFile = "state.json"
)
const (
srStateFormatVersion1 = 1
)
var (
errSRCannotJoin = SRError{
Cause: errors.New("this site is already configured for site-replication"),
Code: ErrSiteReplicationInvalidRequest,
}
errSRDuplicateSites = SRError{
Cause: errors.New("duplicate sites provided for site-replication"),
Code: ErrSiteReplicationInvalidRequest,
}
errSRSelfNotFound = SRError{
Cause: errors.New("none of the given sites correspond to the current one"),
Code: ErrSiteReplicationInvalidRequest,
}
errSRPeerNotFound = SRError{
Cause: errors.New("peer not found"),
Code: ErrSiteReplicationInvalidRequest,
}
errSRRequestorNotFound = SRError{
Cause: errors.New("requesting site not found in site replication config"),
Code: ErrSiteReplicationInvalidRequest,
}
errSRNotEnabled = SRError{
Cause: errors.New("site replication is not enabled"),
Code: ErrSiteReplicationInvalidRequest,
}
errSRResyncStarted = SRError{
Cause: errors.New("site replication resync is already in progress"),
Code: ErrSiteReplicationInvalidRequest,
}
errSRResyncCanceled = SRError{
Cause: errors.New("site replication resync is already canceled"),
Code: ErrSiteReplicationInvalidRequest,
}
errSRNoResync = SRError{
Cause: errors.New("no resync in progress"),
Code: ErrSiteReplicationInvalidRequest,
}
errSRResyncToSelf = SRError{
Cause: errors.New("invalid peer specified - cannot resync to self"),
Code: ErrSiteReplicationInvalidRequest,
}
)
func errSRInvalidRequest(err error) SRError {
return SRError{
Cause: err,
Code: ErrSiteReplicationInvalidRequest,
}
}
func errSRPeerResp(err error) SRError {
return SRError{
Cause: err,
Code: ErrSiteReplicationPeerResp,
}
}
func errSRBackendIssue(err error) SRError {
return SRError{
Cause: err,
Code: ErrSiteReplicationBackendIssue,
}
}
func errSRServiceAccount(err error) SRError {
return SRError{
Cause: err,
Code: ErrSiteReplicationServiceAccountError,
}
}
func errSRBucketConfigError(err error) SRError {
return SRError{
Cause: err,
Code: ErrSiteReplicationBucketConfigError,
}
}
func errSRBucketMetaError(err error) SRError {
return SRError{
Cause: err,
Code: ErrSiteReplicationBucketMetaError,
}
}
func errSRIAMError(err error) SRError {
return SRError{
Cause: err,
Code: ErrSiteReplicationIAMError,
}
}
func errSRConfigMissingError(err error) SRError {
return SRError{
Cause: err,
Code: ErrSiteReplicationConfigMissing,
}
}
var errSRObjectLayerNotReady = SRError{
Cause: fmt.Errorf("object layer not ready"),
Code: ErrServerNotInitialized,
}
func getSRStateFilePath() string {
return srStatePrefix + SlashSeparator + srStateFile
}
// SRError - wrapped error for site replication.
type SRError struct {
Cause error
Code APIErrorCode
}
func (c SRError) Error() string {
if c.Cause != nil {
return c.Cause.Error()
}
return "<nil>"
}
func (c SRError) Unwrap() error {
return c.Cause
}
func wrapSRErr(err error) SRError {
return SRError{Cause: err, Code: ErrInternalError}
}
// SiteReplicationSys - manages cluster-level replication.
type SiteReplicationSys struct {
sync.RWMutex
enabled bool
// In-memory and persisted multi-site replication state.
state srState
iamMetaCache srIAMCache
}
type srState srStateV1
// srStateV1 represents version 1 of the site replication state persistence
// format.
type srStateV1 struct {
Name string `json:"name"`
// Peers maps peers by their deploymentID
Peers map[string]madmin.PeerInfo `json:"peers"`
ServiceAccountAccessKey string `json:"serviceAccountAccessKey"`
}
// srStateData represents the format of the current `srStateFile`.
type srStateData struct {
Version int `json:"version"`
SRState srStateV1 `json:"srState"`
}
// Init - initialize the site replication manager.
func (c *SiteReplicationSys) Init(ctx context.Context, objAPI ObjectLayer) error {
go c.startHealRoutine(ctx, objAPI)
err := c.loadFromDisk(ctx, objAPI)
if err == errConfigNotFound {
return nil
}
c.RLock()
defer c.RUnlock()
if c.enabled {
logger.Info("Cluster replication initialized")
}
return err
}
func (c *SiteReplicationSys) loadFromDisk(ctx context.Context, objAPI ObjectLayer) error {
buf, err := readConfig(ctx, objAPI, getSRStateFilePath())
if err != nil {
if errors.Is(err, errConfigNotFound) {
c.Lock()
defer c.Unlock()
c.state = srState{}
c.enabled = false
}
return err
}
// attempt to read just the version key in the state file to ensure we
// are reading a compatible version.
var ver struct {
Version int `json:"version"`
}
err = json.Unmarshal(buf, &ver)
if err != nil {
return err
}
if ver.Version != srStateFormatVersion1 {
return fmt.Errorf("Unexpected ClusterRepl state version: %d", ver.Version)
}
var sdata srStateData
err = json.Unmarshal(buf, &sdata)
if err != nil {
return err
}
c.Lock()
defer c.Unlock()
c.state = srState(sdata.SRState)
c.enabled = len(c.state.Peers) != 0
return nil
}
func (c *SiteReplicationSys) saveToDisk(ctx context.Context, state srState) error {
sdata := srStateData{
Version: srStateFormatVersion1,
SRState: srStateV1(state),
}
buf, err := json.Marshal(sdata)
if err != nil {
return err
}
objAPI := newObjectLayerFn()
if objAPI == nil {
return errServerNotInitialized
}
if err = saveConfig(ctx, objAPI, getSRStateFilePath(), buf); err != nil {
return err
}
for _, err := range globalNotificationSys.ReloadSiteReplicationConfig(ctx) {
logger.LogIf(ctx, err)
}
c.Lock()
defer c.Unlock()
c.state = state
c.enabled = len(c.state.Peers) != 0
return nil
}
func (c *SiteReplicationSys) removeFromDisk(ctx context.Context) error {
objAPI := newObjectLayerFn()
if objAPI == nil {
return errServerNotInitialized
}
if err := deleteConfig(ctx, objAPI, getSRStateFilePath()); err != nil {
return err
}
for _, err := range globalNotificationSys.ReloadSiteReplicationConfig(ctx) {
logger.LogIf(ctx, err)
}
c.Lock()
defer c.Unlock()
c.state = srState{}
c.enabled = false
return nil
}
const (
// Access key of service account used for perform cluster-replication
// operations.
siteReplicatorSvcAcc = "site-replicator-0"
)
// PeerSiteInfo is a wrapper struct around madmin.PeerSite with extra info on site status
type PeerSiteInfo struct {
madmin.PeerSite
self bool
DeploymentID string
Replicated bool // true if already participating in site replication
Empty bool // true if cluster has no buckets
}
// getSiteStatuses gathers more info on the sites being added
func (c *SiteReplicationSys) getSiteStatuses(ctx context.Context, sites ...madmin.PeerSite) (psi []PeerSiteInfo, err error) {
psi = make([]PeerSiteInfo, 0, len(sites))
for _, v := range sites {
admClient, err := getAdminClient(v.Endpoint, v.AccessKey, v.SecretKey)
if err != nil {
return psi, errSRPeerResp(fmt.Errorf("unable to create admin client for %s: %w", v.Name, err))
}
info, err := admClient.ServerInfo(ctx)
if err != nil {
return psi, errSRPeerResp(fmt.Errorf("unable to fetch server info for %s: %w", v.Name, err))
}
s3Client, err := getS3Client(v)
if err != nil {
return psi, errSRPeerResp(fmt.Errorf("unable to create s3 client for %s: %w", v.Name, err))
}
buckets, err := s3Client.ListBuckets(ctx)
if err != nil {
return psi, errSRPeerResp(fmt.Errorf("unable to list buckets for %s: %v", v.Name, err))
}
psi = append(psi, PeerSiteInfo{
PeerSite: v,
DeploymentID: info.DeploymentID,
Empty: len(buckets) == 0,
self: info.DeploymentID == globalDeploymentID,
})
}
return
}
// AddPeerClusters - add cluster sites for replication configuration.
func (c *SiteReplicationSys) AddPeerClusters(ctx context.Context, psites []madmin.PeerSite) (madmin.ReplicateAddStatus, error) {
sites, serr := c.getSiteStatuses(ctx, psites...)
if serr != nil {
return madmin.ReplicateAddStatus{}, serr
}
var (
currSites madmin.SiteReplicationInfo
currDeploymentIDsSet = set.NewStringSet()
err error
)
currSites, err = c.GetClusterInfo(ctx)
if err != nil {
return madmin.ReplicateAddStatus{}, errSRBackendIssue(err)
}
for _, v := range currSites.Sites {
currDeploymentIDsSet.Add(v.DeploymentID)
}
deploymentIDsSet := set.NewStringSet()
localHasBuckets := false
nonLocalPeerWithBuckets := ""
selfIdx := -1
for i, v := range sites {
// deploymentIDs must be unique
if deploymentIDsSet.Contains(v.DeploymentID) {
return madmin.ReplicateAddStatus{}, errSRDuplicateSites
}
deploymentIDsSet.Add(v.DeploymentID)
if v.self {
selfIdx = i
localHasBuckets = !v.Empty
continue
}
if !v.Empty && !currDeploymentIDsSet.Contains(v.DeploymentID) {
nonLocalPeerWithBuckets = v.Name
}
}
if selfIdx == -1 {
return madmin.ReplicateAddStatus{}, errSRBackendIssue(fmt.Errorf("global deployment ID %s mismatch, expected one of %s", globalDeploymentID, deploymentIDsSet))
}
if !currDeploymentIDsSet.IsEmpty() {
// If current cluster is already SR enabled and no new site being added ,fail.
if currDeploymentIDsSet.Equals(deploymentIDsSet) {
return madmin.ReplicateAddStatus{}, errSRCannotJoin
}
if len(currDeploymentIDsSet.Intersection(deploymentIDsSet)) != len(currDeploymentIDsSet) {
diffSlc := getMissingSiteNames(currDeploymentIDsSet, deploymentIDsSet, currSites.Sites)
return madmin.ReplicateAddStatus{}, errSRInvalidRequest(fmt.Errorf("all existing replicated sites must be specified - missing %s", strings.Join(diffSlc, " ")))
}
}
// validate that all clusters are using the same IDP settings.
pass, err := c.validateIDPSettings(ctx, sites)
if err != nil {
return madmin.ReplicateAddStatus{}, err
}
if !pass {
return madmin.ReplicateAddStatus{}, errSRInvalidRequest(errors.New("all cluster sites must have the same IAM/IDP settings"))
}
// For this `add` API, either all clusters must be empty or the local
// cluster must be the only one having some buckets.
if localHasBuckets && nonLocalPeerWithBuckets != "" {
return madmin.ReplicateAddStatus{}, errSRInvalidRequest(errors.New("only one cluster may have data when configuring site replication"))
}
if !localHasBuckets && nonLocalPeerWithBuckets != "" {
return madmin.ReplicateAddStatus{}, errSRInvalidRequest(fmt.Errorf("please send your request to the cluster containing data/buckets: %s", nonLocalPeerWithBuckets))
}
// FIXME: Ideally, we also need to check if there are any global IAM
// policies and any (LDAP user created) service accounts on the other
// peer clusters, and if so, reject the cluster replicate add request.
// This is not yet implemented.
// VALIDATIONS COMPLETE.
// Create a common service account for all clusters, with root
// permissions.
// Create a local service account.
// Generate a secret key for the service account if not created already.
var secretKey string
var svcCred auth.Credentials
sa, _, err := globalIAMSys.getServiceAccount(ctx, siteReplicatorSvcAcc)
switch {
case err == errNoSuchServiceAccount:
_, secretKey, err = auth.GenerateCredentials()
if err != nil {
return madmin.ReplicateAddStatus{}, errSRServiceAccount(fmt.Errorf("unable to create local service account: %w", err))
}
svcCred, _, err = globalIAMSys.NewServiceAccount(ctx, sites[selfIdx].AccessKey, nil, newServiceAccountOpts{
accessKey: siteReplicatorSvcAcc,
secretKey: secretKey,
allowSiteReplicatorAccount: true,
})
if err != nil {
return madmin.ReplicateAddStatus{}, errSRServiceAccount(fmt.Errorf("unable to create local service account: %w", err))
}
case err == nil:
svcCred = sa.Credentials
secretKey = svcCred.SecretKey
default:
return madmin.ReplicateAddStatus{}, errSRBackendIssue(err)
}
joinReq := madmin.SRPeerJoinReq{
SvcAcctAccessKey: svcCred.AccessKey,
SvcAcctSecretKey: secretKey,
Peers: make(map[string]madmin.PeerInfo),
}
for _, v := range sites {
joinReq.Peers[v.DeploymentID] = madmin.PeerInfo{
Endpoint: v.Endpoint,
Name: v.Name,
DeploymentID: v.DeploymentID,
}
}
addedCount := 0
var (
peerAddErr error
admClient *madmin.AdminClient
)
for _, v := range sites {
if v.self {
continue
}
switch {
case currDeploymentIDsSet.Contains(v.DeploymentID):
admClient, err = c.getAdminClient(ctx, v.DeploymentID)
default:
admClient, err = getAdminClient(v.Endpoint, v.AccessKey, v.SecretKey)
}
if err != nil {
peerAddErr = errSRPeerResp(fmt.Errorf("unable to create admin client for %s: %w", v.Name, err))
break
}
joinReq.SvcAcctParent = v.AccessKey
err = admClient.SRPeerJoin(ctx, joinReq)
if err != nil {
peerAddErr = errSRPeerResp(fmt.Errorf("unable to link with peer %s: %w", v.Name, err))
break
}
addedCount++
}
if peerAddErr != nil {
if addedCount == 0 {
return madmin.ReplicateAddStatus{}, peerAddErr
}
// In this case, it means at least one cluster was added
// successfully, we need to send a response to the client with
// some details - FIXME: the disks on this cluster would need to
// be cleaned to recover.
partial := madmin.ReplicateAddStatus{
Status: madmin.ReplicateAddStatusPartial,
ErrDetail: peerAddErr.Error(),
}
return partial, nil
}
// Other than handling existing buckets, we can now save the cluster
// replication configuration state.
state := srState{
Name: sites[selfIdx].Name,
Peers: joinReq.Peers,
ServiceAccountAccessKey: svcCred.AccessKey,
}
if err = c.saveToDisk(ctx, state); err != nil {
return madmin.ReplicateAddStatus{
Status: madmin.ReplicateAddStatusPartial,
ErrDetail: fmt.Sprintf("unable to save cluster-replication state on local: %v", err),
}, nil
}
result := madmin.ReplicateAddStatus{
Success: true,
Status: madmin.ReplicateAddStatusSuccess,
}
if err := c.syncToAllPeers(ctx); err != nil {
result.InitialSyncErrorMessage = err.Error()
}
return result, nil
}
// PeerJoinReq - internal API handler to respond to a peer cluster's request to join.
func (c *SiteReplicationSys) PeerJoinReq(ctx context.Context, arg madmin.SRPeerJoinReq) error {
var ourName string
for d, p := range arg.Peers {
if d == globalDeploymentID {
ourName = p.Name
break
}
}
if ourName == "" {
return errSRSelfNotFound
}
_, _, err := globalIAMSys.GetServiceAccount(ctx, arg.SvcAcctAccessKey)
if err == errNoSuchServiceAccount {
_, _, err = globalIAMSys.NewServiceAccount(ctx, arg.SvcAcctParent, nil, newServiceAccountOpts{
accessKey: arg.SvcAcctAccessKey,
secretKey: arg.SvcAcctSecretKey,
allowSiteReplicatorAccount: arg.SvcAcctAccessKey == siteReplicatorSvcAcc,
})
}
if err != nil {
return errSRServiceAccount(fmt.Errorf("unable to create service account on %s: %v", ourName, err))
}
state := srState{
Name: ourName,
Peers: arg.Peers,
ServiceAccountAccessKey: arg.SvcAcctAccessKey,
}
if err = c.saveToDisk(ctx, state); err != nil {
return errSRBackendIssue(fmt.Errorf("unable to save cluster-replication state to drive on %s: %v", ourName, err))
}
return nil
}
// GetIDPSettings returns info about the configured identity provider. It is
// used to validate that all peers have the same IDP.
func (c *SiteReplicationSys) GetIDPSettings(ctx context.Context) madmin.IDPSettings {
s := madmin.IDPSettings{}
s.LDAP = madmin.LDAPSettings{
IsLDAPEnabled: globalIAMSys.LDAPConfig.Enabled(),
LDAPUserDNSearchBase: globalIAMSys.LDAPConfig.LDAP.UserDNSearchBaseDistName,
LDAPUserDNSearchFilter: globalIAMSys.LDAPConfig.LDAP.UserDNSearchFilter,
LDAPGroupSearchBase: globalIAMSys.LDAPConfig.LDAP.GroupSearchBaseDistName,
LDAPGroupSearchFilter: globalIAMSys.LDAPConfig.LDAP.GroupSearchFilter,
}
s.OpenID = globalIAMSys.OpenIDConfig.GetSettings()
if s.OpenID.Enabled {
s.OpenID.Region = globalSite.Region
}
return s
}
func (c *SiteReplicationSys) validateIDPSettings(ctx context.Context, peers []PeerSiteInfo) (bool, error) {
s := make([]madmin.IDPSettings, 0, len(peers))
for _, v := range peers {
if v.self {
s = append(s, c.GetIDPSettings(ctx))
continue
}
admClient, err := getAdminClient(v.Endpoint, v.AccessKey, v.SecretKey)
if err != nil {
return false, errSRPeerResp(fmt.Errorf("unable to create admin client for %s: %w", v.Name, err))
}
is, err := admClient.SRPeerGetIDPSettings(ctx)
if err != nil {
return false, errSRPeerResp(fmt.Errorf("unable to fetch IDP settings from %s: %v", v.Name, err))
}
s = append(s, is)
}
for i := 1; i < len(s); i++ {
if !reflect.DeepEqual(s[i], s[0]) {
return false, nil
}
}
return true, nil
}
// GetClusterInfo - returns site replication information.
func (c *SiteReplicationSys) GetClusterInfo(ctx context.Context) (info madmin.SiteReplicationInfo, err error) {
c.RLock()
defer c.RUnlock()
if !c.enabled {
return info, nil
}
info.Enabled = true
info.Name = c.state.Name
info.Sites = make([]madmin.PeerInfo, 0, len(c.state.Peers))
for _, peer := range c.state.Peers {
info.Sites = append(info.Sites, peer)
}
sort.Slice(info.Sites, func(i, j int) bool {
return info.Sites[i].Name < info.Sites[j].Name
})
info.ServiceAccountAccessKey = c.state.ServiceAccountAccessKey
return info, nil
}
const (
makeBucketWithVersion = "MakeBucketWithVersioning"
configureReplication = "ConfigureReplication"
deleteBucket = "DeleteBucket"
replicateIAMItem = "SRPeerReplicateIAMItem"
replicateBucketMetadata = "SRPeerReplicateBucketMeta"
)
// MakeBucketHook - called during a regular make bucket call when cluster
// replication is enabled. It is responsible for the creation of the same bucket
// on remote clusters, and creating replication rules on local and peer
// clusters.
func (c *SiteReplicationSys) MakeBucketHook(ctx context.Context, bucket string, opts MakeBucketOptions) error {
// At this point, the local bucket is created.
c.RLock()
defer c.RUnlock()
if !c.enabled {
return nil
}
optsMap := make(map[string]string)
if opts.LockEnabled {
optsMap["lockEnabled"] = "true"
optsMap["versioningEnabled"] = "true"
}
if opts.VersioningEnabled {
optsMap["versioningEnabled"] = "true"
}
if opts.ForceCreate {
optsMap["forceCreate"] = "true"
}
createdAt, _ := globalBucketMetadataSys.CreatedAt(bucket)
optsMap["createdAt"] = createdAt.UTC().Format(time.RFC3339Nano)
opts.CreatedAt = createdAt
// Create bucket and enable versioning on all peers.
makeBucketConcErr := c.concDo(
func() error {
return c.annotateErr(makeBucketWithVersion, c.PeerBucketMakeWithVersioningHandler(ctx, bucket, opts))
},
func(deploymentID string, p madmin.PeerInfo) error {
admClient, err := c.getAdminClient(ctx, deploymentID)
if err != nil {
return err
}
return c.annotatePeerErr(p.Name, makeBucketWithVersion, admClient.SRPeerBucketOps(ctx, bucket, madmin.MakeWithVersioningBktOp, optsMap))
},
makeBucketWithVersion,
)
// Create bucket remotes and add replication rules for the bucket on self and peers.
makeRemotesConcErr := c.concDo(
func() error {
return c.annotateErr(configureReplication, c.PeerBucketConfigureReplHandler(ctx, bucket))
},
func(deploymentID string, p madmin.PeerInfo) error {
admClient, err := c.getAdminClient(ctx, deploymentID)
if err != nil {
return err
}
return c.annotatePeerErr(p.Name, configureReplication, admClient.SRPeerBucketOps(ctx, bucket, madmin.ConfigureReplBktOp, nil))
},
configureReplication,
)
if err := errors.Unwrap(makeBucketConcErr); err != nil {
return err
}
if err := errors.Unwrap(makeRemotesConcErr); err != nil {
return err
}
return nil
}
// DeleteBucketHook - called during a regular delete bucket call when cluster
// replication is enabled. It is responsible for the deletion of the same bucket
// on remote clusters.
func (c *SiteReplicationSys) DeleteBucketHook(ctx context.Context, bucket string, forceDelete bool) error {
// At this point, the local bucket is deleted.
c.RLock()
defer c.RUnlock()
if !c.enabled {
return nil
}
op := madmin.DeleteBucketBktOp
if forceDelete {
op = madmin.ForceDeleteBucketBktOp
}
// Send bucket delete to other clusters.
cerr := c.concDo(nil, func(deploymentID string, p madmin.PeerInfo) error {
admClient, err := c.getAdminClient(ctx, deploymentID)
if err != nil {
return wrapSRErr(err)
}
return c.annotatePeerErr(p.Name, deleteBucket, admClient.SRPeerBucketOps(ctx, bucket, op, nil))
},
deleteBucket,
)
return errors.Unwrap(cerr)
}
// PeerBucketMakeWithVersioningHandler - creates bucket and enables versioning.
func (c *SiteReplicationSys) PeerBucketMakeWithVersioningHandler(ctx context.Context, bucket string, opts MakeBucketOptions) error {
objAPI := newObjectLayerFn()
if objAPI == nil {
return errServerNotInitialized
}
err := objAPI.MakeBucket(ctx, bucket, opts)
if err != nil {
// Check if this is a bucket exists error.
_, ok1 := err.(BucketExists)
_, ok2 := err.(BucketAlreadyExists)
if !ok1 && !ok2 {
return wrapSRErr(c.annotateErr(makeBucketWithVersion, err))
}
} else {
// Load updated bucket metadata into memory as new
// bucket was created.
globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket)
}
meta, err := globalBucketMetadataSys.Get(bucket)
if err != nil {
return wrapSRErr(c.annotateErr(makeBucketWithVersion, err))
}
meta.SetCreatedAt(opts.CreatedAt)
meta.VersioningConfigXML = enabledBucketVersioningConfig
if opts.LockEnabled {
meta.ObjectLockConfigXML = enabledBucketObjectLockConfig
}
if err := meta.Save(context.Background(), objAPI); err != nil {
return wrapSRErr(err)
}
globalBucketMetadataSys.Set(bucket, meta)
// Load updated bucket metadata into memory as new metadata updated.
globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket)
return nil
}
// PeerBucketConfigureReplHandler - configures replication remote and
// replication rules to all other peers for the local bucket.
func (c *SiteReplicationSys) PeerBucketConfigureReplHandler(ctx context.Context, bucket string) error {
creds, err := c.getPeerCreds()
if err != nil {
return wrapSRErr(err)
}
// The following function, creates a bucket remote and sets up a bucket
// replication rule for the given peer.
configurePeerFn := func(d string, peer madmin.PeerInfo) error {
// Create bucket replication rule to this peer.
// To add the bucket replication rule, we fetch the current
// server configuration, and convert it to minio-go's
// replication configuration type (by converting to xml and
// parsing it back), use minio-go's add rule function, and
// finally convert it back to the server type (again via xml).
// This is needed as there is no add-rule function in the server
// yet.
// Though we do not check if the rule already exists, this is
// not a problem as we are always using the same replication
// rule ID - if the rule already exists, it is just replaced.
replicationConfigS, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket)
if err != nil {
_, ok := err.(BucketReplicationConfigNotFound)
if !ok {
return err
}
}
var replicationConfig replication.Config
if replicationConfigS != nil {
replCfgSBytes, err := xml.Marshal(replicationConfigS)
if err != nil {
return err
}
err = xml.Unmarshal(replCfgSBytes, &replicationConfig)
if err != nil {
return err
}
}
var (
ruleID = fmt.Sprintf("site-repl-%s", d)
hasRule bool
)
var ruleARN string
for _, r := range replicationConfig.Rules {
if r.ID == ruleID {
hasRule = true
ruleARN = r.Destination.Bucket
}
}
ep, _ := url.Parse(peer.Endpoint)
var targets []madmin.BucketTarget
if targetsPtr, _ := globalBucketTargetSys.ListBucketTargets(ctx, bucket); targetsPtr != nil {
targets = targetsPtr.Targets
}
targetARN := ""
var updateTgt bool
var targetToUpdate madmin.BucketTarget
for _, target := range targets {
if target.Arn == ruleARN {
targetARN = ruleARN
if target.URL().String() != peer.Endpoint {
updateTgt = true
targetToUpdate = target
}
break
}
}
// replication config had a stale target ARN - update the endpoint
if updateTgt {
targetToUpdate.Endpoint = ep.Host
targetToUpdate.Secure = ep.Scheme == "https"
targetToUpdate.Credentials = &madmin.Credentials{
AccessKey: creds.AccessKey,
SecretKey: creds.SecretKey,
}
if !peer.SyncState.Empty() {
targetToUpdate.ReplicationSync = (peer.SyncState == madmin.SyncEnabled)
}
err := globalBucketTargetSys.SetTarget(ctx, bucket, &targetToUpdate, true)
if err != nil {
return c.annotatePeerErr(peer.Name, "Bucket target update error", err)
}
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
if err != nil {
return wrapSRErr(err)
}
tgtBytes, err := json.Marshal(&targets)
if err != nil {
return wrapSRErr(err)
}
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil {
return wrapSRErr(err)
}
}
// no replication rule for this peer or target ARN missing in bucket targets
if targetARN == "" {
bucketTarget := madmin.BucketTarget{
SourceBucket: bucket,
Endpoint: ep.Host,
Credentials: &madmin.Credentials{
AccessKey: creds.AccessKey,
SecretKey: creds.SecretKey,
},
TargetBucket: bucket,
Secure: ep.Scheme == "https",
API: "s3v4",
Type: madmin.ReplicationService,
Region: "",
ReplicationSync: peer.SyncState == madmin.SyncEnabled,
}
var exists bool // true if ARN already exists
bucketTarget.Arn, exists = globalBucketTargetSys.getRemoteARN(bucket, &bucketTarget, peer.DeploymentID)
if !exists { // persist newly generated ARN to targets and metadata on disk
err := globalBucketTargetSys.SetTarget(ctx, bucket, &bucketTarget, false)
if err != nil {
return c.annotatePeerErr(peer.Name, "Bucket target creation error", err)
}
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
if err != nil {
return err
}
tgtBytes, err := json.Marshal(&targets)
if err != nil {
return err
}
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil {
return err
}
}
targetARN = bucketTarget.Arn
}
opts := replication.Options{
// Set the ID so we can identify the rule as being
// created for site-replication and include the
// destination cluster's deployment ID.
ID: ruleID,
// Use a helper to generate unique priority numbers.
Priority: fmt.Sprintf("%d", getPriorityHelper(replicationConfig)),
Op: replication.AddOption,
RuleStatus: "enable",
DestBucket: targetARN,
// Replicate everything!
ReplicateDeletes: "enable",
ReplicateDeleteMarkers: "enable",
ReplicaSync: "enable",
ExistingObjectReplicate: "enable",
}
switch {
case hasRule:
if ruleARN != opts.DestBucket {
// remove stale replication rule and replace rule with correct target ARN
if len(replicationConfig.Rules) > 1 {
err = replicationConfig.RemoveRule(opts)
} else {
replicationConfig = replication.Config{}
}
if err == nil {
err = replicationConfig.AddRule(opts)
}
} else {
err = replicationConfig.EditRule(opts)
}
default:
err = replicationConfig.AddRule(opts)
}
if err != nil {
return c.annotatePeerErr(peer.Name, "Error adding bucket replication rule", err)
}
// Now convert the configuration back to server's type so we can
// do some validation.
newReplCfgBytes, err := xml.Marshal(replicationConfig)
if err != nil {
return err
}
newReplicationConfig, err := sreplication.ParseConfig(bytes.NewReader(newReplCfgBytes))
if err != nil {
return err
}
sameTarget, apiErr := validateReplicationDestination(ctx, bucket, newReplicationConfig, true)
if apiErr != noError {
return fmt.Errorf("bucket replication config validation error: %#v", apiErr)
}
err = newReplicationConfig.Validate(bucket, sameTarget)
if err != nil {
return err
}
// Config looks good, so we save it.
replCfgData, err := xml.Marshal(newReplicationConfig)
if err != nil {
return err
}
_, err = globalBucketMetadataSys.Update(ctx, bucket, bucketReplicationConfig, replCfgData)
return c.annotatePeerErr(peer.Name, "Error updating replication configuration", err)
}
c.RLock()
defer c.RUnlock()
errMap := make(map[string]error, len(c.state.Peers))
for d, peer := range c.state.Peers {
if d == globalDeploymentID {
continue
}
errMap[d] = configurePeerFn(d, peer)
}
return c.toErrorFromErrMap(errMap, configureReplication)
}
// PeerBucketDeleteHandler - deletes bucket on local in response to a delete
// bucket request from a peer.
func (c *SiteReplicationSys) PeerBucketDeleteHandler(ctx context.Context, bucket string, opts DeleteBucketOptions) error {
c.RLock()
defer c.RUnlock()
if !c.enabled {
return errSRNotEnabled
}
objAPI := newObjectLayerFn()
if objAPI == nil {
return errServerNotInitialized
}
if globalDNSConfig != nil {
if err := globalDNSConfig.Delete(bucket); err != nil {
return err
}
}
err := objAPI.DeleteBucket(ctx, bucket, opts)
if err != nil {
if globalDNSConfig != nil {
if err2 := globalDNSConfig.Put(bucket); err2 != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to restore bucket DNS entry %w, please fix it manually", err2))
}
}
return err
}
globalNotificationSys.DeleteBucketMetadata(ctx, bucket)
return nil
}
// IAMChangeHook - called when IAM items need to be replicated to peer clusters.
// This includes named policy creation, policy mapping changes and service
// account changes.
//
// All policies are replicated.
//
// Policy mappings are only replicated when they are for LDAP users or groups
// (as an external IDP is always assumed when SR is used). In the case of
// OpenID, such mappings are provided from the IDP directly and so are not
// applicable here.
//
// Service accounts are replicated as long as they are not meant for the root
// user.
//
// STS accounts are replicated, but only if the session token is verifiable
// using the local cluster's root credential.
func (c *SiteReplicationSys) IAMChangeHook(ctx context.Context, item madmin.SRIAMItem) error {
// The IAM item has already been applied to the local cluster at this
// point, and only needs to be updated on all remote peer clusters.
c.RLock()
defer c.RUnlock()
if !c.enabled {
return nil
}
cerr := c.concDo(nil, func(d string, p madmin.PeerInfo) error {
admClient, err := c.getAdminClient(ctx, d)
if err != nil {
return wrapSRErr(err)
}
return c.annotatePeerErr(p.Name, replicateIAMItem, admClient.SRPeerReplicateIAMItem(ctx, item))
},
replicateIAMItem,
)
return errors.Unwrap(cerr)
}
// PeerAddPolicyHandler - copies IAM policy to local. A nil policy argument,
// causes the named policy to be deleted.
func (c *SiteReplicationSys) PeerAddPolicyHandler(ctx context.Context, policyName string, p *iampolicy.Policy, updatedAt time.Time) error {
var err error
// skip overwrite of local update if peer sent stale info
if !updatedAt.IsZero() {
if p, err := globalIAMSys.store.GetPolicyDoc(policyName); err == nil && p.UpdateDate.After(updatedAt) {
return nil
}
}
if p == nil {
err = globalIAMSys.DeletePolicy(ctx, policyName, true)
} else {
_, err = globalIAMSys.SetPolicy(ctx, policyName, *p)
}
if err != nil {
return wrapSRErr(err)
}
return nil
}
// PeerIAMUserChangeHandler - copies IAM user to local.
func (c *SiteReplicationSys) PeerIAMUserChangeHandler(ctx context.Context, change *madmin.SRIAMUser, updatedAt time.Time) error {
if change == nil {
return errSRInvalidRequest(errInvalidArgument)
}
// skip overwrite of local update if peer sent stale info
if !updatedAt.IsZero() {
if ui, err := globalIAMSys.GetUserInfo(ctx, change.AccessKey); err == nil && ui.UpdatedAt.After(updatedAt) {
return nil
}
}
var err error
if change.IsDeleteReq {
err = globalIAMSys.DeleteUser(ctx, change.AccessKey, true)
} else {
if change.UserReq == nil {
return errSRInvalidRequest(errInvalidArgument)
}
userReq := *change.UserReq
if userReq.Status != "" && userReq.SecretKey == "" {
// Status is set without secretKey updates means we are
// only changing the account status.
_, err = globalIAMSys.SetUserStatus(ctx, change.AccessKey, userReq.Status)
} else {
_, err = globalIAMSys.CreateUser(ctx, change.AccessKey, userReq)
}
}
if err != nil {
return wrapSRErr(err)
}
return nil
}
// PeerGroupInfoChangeHandler - copies group changes to local.
func (c *SiteReplicationSys) PeerGroupInfoChangeHandler(ctx context.Context, change *madmin.SRGroupInfo, updatedAt time.Time) error {
if change == nil {
return errSRInvalidRequest(errInvalidArgument)
}
updReq := change.UpdateReq
var err error
// skip overwrite of local update if peer sent stale info
if !updatedAt.IsZero() {
if gd, err := globalIAMSys.GetGroupDescription(updReq.Group); err == nil && gd.UpdatedAt.After(updatedAt) {
return nil
}
}
if updReq.IsRemove {
_, err = globalIAMSys.RemoveUsersFromGroup(ctx, updReq.Group, updReq.Members)
} else {
if updReq.Status != "" && len(updReq.Members) == 0 {
_, err = globalIAMSys.SetGroupStatus(ctx, updReq.Group, updReq.Status == madmin.GroupEnabled)
} else {
_, err = globalIAMSys.AddUsersToGroup(ctx, updReq.Group, updReq.Members)
if err == nil && updReq.Status != madmin.GroupEnabled {
_, err = globalIAMSys.SetGroupStatus(ctx, updReq.Group, updReq.Status == madmin.GroupEnabled)
}
}
}
if err != nil {
return wrapSRErr(err)
}
return nil
}
// PeerSvcAccChangeHandler - copies service-account change to local.
func (c *SiteReplicationSys) PeerSvcAccChangeHandler(ctx context.Context, change *madmin.SRSvcAccChange, updatedAt time.Time) error {
if change == nil {
return errSRInvalidRequest(errInvalidArgument)
}
switch {
case change.Create != nil:
var sp *iampolicy.Policy
var err error
if len(change.Create.SessionPolicy) > 0 {
sp, err = iampolicy.ParseConfig(bytes.NewReader(change.Create.SessionPolicy))
if err != nil {
return wrapSRErr(err)
}
}
// skip overwrite of local update if peer sent stale info
if !updatedAt.IsZero() && change.Create.AccessKey != "" {
if sa, _, err := globalIAMSys.getServiceAccount(ctx, change.Create.AccessKey); err == nil && sa.UpdatedAt.After(updatedAt) {
return nil
}
}
opts := newServiceAccountOpts{
accessKey: change.Create.AccessKey,
secretKey: change.Create.SecretKey,
sessionPolicy: sp,
claims: change.Create.Claims,
name: change.Create.Name,
description: change.Create.Description,
expiration: change.Create.Expiration,
}
_, _, err = globalIAMSys.NewServiceAccount(ctx, change.Create.Parent, change.Create.Groups, opts)
if err != nil {
return wrapSRErr(err)
}
case change.Update != nil:
var sp *iampolicy.Policy
var err error
if len(change.Update.SessionPolicy) > 0 {
sp, err = iampolicy.ParseConfig(bytes.NewReader(change.Update.SessionPolicy))
if err != nil {
return wrapSRErr(err)
}
}
// skip overwrite of local update if peer sent stale info
if !updatedAt.IsZero() {
if sa, _, err := globalIAMSys.getServiceAccount(ctx, change.Update.AccessKey); err == nil && sa.UpdatedAt.After(updatedAt) {
return nil
}
}
opts := updateServiceAccountOpts{
secretKey: change.Update.SecretKey,
status: change.Update.Status,
name: change.Update.Name,
description: change.Update.Description,
sessionPolicy: sp,
expiration: change.Update.Expiration,
}
_, err = globalIAMSys.UpdateServiceAccount(ctx, change.Update.AccessKey, opts)
if err != nil {
return wrapSRErr(err)
}
case change.Delete != nil:
// skip overwrite of local update if peer sent stale info
if !updatedAt.IsZero() {
if sa, _, err := globalIAMSys.getServiceAccount(ctx, change.Delete.AccessKey); err == nil && sa.UpdatedAt.After(updatedAt) {
return nil
}
}
if err := globalIAMSys.DeleteServiceAccount(ctx, change.Delete.AccessKey, true); err != nil {
return wrapSRErr(err)
}
}
return nil
}
// PeerPolicyMappingHandler - copies policy mapping to local.
func (c *SiteReplicationSys) PeerPolicyMappingHandler(ctx context.Context, mapping *madmin.SRPolicyMapping, updatedAt time.Time) error {
if mapping == nil {
return errSRInvalidRequest(errInvalidArgument)
}
// skip overwrite of local update if peer sent stale info
if !updatedAt.IsZero() {
mp, ok := globalIAMSys.store.GetMappedPolicy(mapping.Policy, mapping.IsGroup)
if ok && mp.UpdatedAt.After(updatedAt) {
return nil
}
}
_, err := globalIAMSys.PolicyDBSet(ctx, mapping.UserOrGroup, mapping.Policy, IAMUserType(mapping.UserType), mapping.IsGroup)
if err != nil {
return wrapSRErr(err)
}
return nil
}
// PeerSTSAccHandler - replicates STS credential locally.
func (c *SiteReplicationSys) PeerSTSAccHandler(ctx context.Context, stsCred *madmin.SRSTSCredential, updatedAt time.Time) error {
if stsCred == nil {
return errSRInvalidRequest(errInvalidArgument)
}
// skip overwrite of local update if peer sent stale info
if !updatedAt.IsZero() {
if u, _, err := globalIAMSys.getTempAccount(ctx, stsCred.AccessKey); err == nil {
if u.UpdatedAt.After(updatedAt) {
return nil
}
}
}
// Verify the session token of the stsCred
claims, err := auth.ExtractClaims(stsCred.SessionToken, globalActiveCred.SecretKey)
if err != nil {
return fmt.Errorf("STS credential could not be verified: %w", err)
}
mapClaims := claims.Map()
expiry, err := auth.ExpToInt64(mapClaims["exp"])
if err != nil {
return fmt.Errorf("Expiry claim was not found: %v: %w", mapClaims, err)
}
cred := auth.Credentials{
AccessKey: stsCred.AccessKey,
SecretKey: stsCred.SecretKey,
Expiration: time.Unix(expiry, 0).UTC(),
SessionToken: stsCred.SessionToken,
ParentUser: stsCred.ParentUser,
Status: auth.AccountOn,
}
// Extract the username and lookup DN and groups in LDAP.
ldapUser, isLDAPSTS := claims.Lookup(ldapUserN)
if isLDAPSTS {
// Need to lookup the groups from LDAP.
_, ldapGroups, err := globalIAMSys.LDAPConfig.LookupUserDN(ldapUser)
if err != nil {
return fmt.Errorf("unable to query LDAP server for %s: %w", ldapUser, err)
}
cred.Groups = ldapGroups
}
// Set these credentials to IAM.
if _, err := globalIAMSys.SetTempUser(ctx, cred.AccessKey, cred, stsCred.ParentPolicyMapping); err != nil {
return fmt.Errorf("unable to save STS credential and/or parent policy mapping: %w", err)
}
return nil
}
// BucketMetaHook - called when bucket meta changes happen and need to be
// replicated to peer clusters.
func (c *SiteReplicationSys) BucketMetaHook(ctx context.Context, item madmin.SRBucketMeta) error {
// The change has already been applied to the local cluster at this
// point, and only needs to be updated on all remote peer clusters.
c.RLock()
defer c.RUnlock()
if !c.enabled {
return nil
}
cerr := c.concDo(nil, func(d string, p madmin.PeerInfo) error {
admClient, err := c.getAdminClient(ctx, d)
if err != nil {
return wrapSRErr(err)
}
return c.annotatePeerErr(p.Name, replicateBucketMetadata, admClient.SRPeerReplicateBucketMeta(ctx, item))
},
replicateBucketMetadata,
)
return errors.Unwrap(cerr)
}
// PeerBucketVersioningHandler - updates versioning config to local cluster.
func (c *SiteReplicationSys) PeerBucketVersioningHandler(ctx context.Context, bucket string, versioning *string, updatedAt time.Time) error {
if versioning != nil {
// skip overwrite if local update is newer than peer update.
if !updatedAt.IsZero() {
if _, updateTm, err := globalBucketMetadataSys.GetVersioningConfig(bucket); err == nil && updateTm.After(updatedAt) {
return nil
}
}
configData, err := base64.StdEncoding.DecodeString(*versioning)
if err != nil {
return wrapSRErr(err)
}
_, err = globalBucketMetadataSys.Update(ctx, bucket, bucketVersioningConfig, configData)
if err != nil {
return wrapSRErr(err)
}
return nil
}
return nil
}
// PeerBucketPolicyHandler - copies/deletes policy to local cluster.
func (c *SiteReplicationSys) PeerBucketPolicyHandler(ctx context.Context, bucket string, policy *bktpolicy.Policy, updatedAt time.Time) error {
// skip overwrite if local update is newer than peer update.
if !updatedAt.IsZero() {
if _, updateTm, err := globalBucketMetadataSys.GetPolicyConfig(bucket); err == nil && updateTm.After(updatedAt) {
return nil
}
}
if policy != nil {
configData, err := json.Marshal(policy)
if err != nil {
return wrapSRErr(err)
}
_, err = globalBucketMetadataSys.Update(ctx, bucket, bucketPolicyConfig, configData)
if err != nil {
return wrapSRErr(err)
}
return nil
}
// Delete the bucket policy
_, err := globalBucketMetadataSys.Delete(ctx, bucket, bucketPolicyConfig)
if err != nil {
return wrapSRErr(err)
}
return nil
}
// PeerBucketTaggingHandler - copies/deletes tags to local cluster.
func (c *SiteReplicationSys) PeerBucketTaggingHandler(ctx context.Context, bucket string, tags *string, updatedAt time.Time) error {
// skip overwrite if local update is newer than peer update.
if !updatedAt.IsZero() {
if _, updateTm, err := globalBucketMetadataSys.GetTaggingConfig(bucket); err == nil && updateTm.After(updatedAt) {
return nil
}
}
if tags != nil {
configData, err := base64.StdEncoding.DecodeString(*tags)
if err != nil {
return wrapSRErr(err)
}
_, err = globalBucketMetadataSys.Update(ctx, bucket, bucketTaggingConfig, configData)
if err != nil {
return wrapSRErr(err)
}
return nil
}
// Delete the tags
_, err := globalBucketMetadataSys.Delete(ctx, bucket, bucketTaggingConfig)
if err != nil {
return wrapSRErr(err)
}
return nil
}
// PeerBucketObjectLockConfigHandler - sets object lock on local bucket.
func (c *SiteReplicationSys) PeerBucketObjectLockConfigHandler(ctx context.Context, bucket string, objectLockData *string, updatedAt time.Time) error {
if objectLockData != nil {
// skip overwrite if local update is newer than peer update.
if !updatedAt.IsZero() {
if _, updateTm, err := globalBucketMetadataSys.GetObjectLockConfig(bucket); err == nil && updateTm.After(updatedAt) {
return nil
}
}
configData, err := base64.StdEncoding.DecodeString(*objectLockData)
if err != nil {
return wrapSRErr(err)
}
_, err = globalBucketMetadataSys.Update(ctx, bucket, objectLockConfig, configData)
if err != nil {
return wrapSRErr(err)
}
return nil
}
return nil
}
// PeerBucketSSEConfigHandler - copies/deletes SSE config to local cluster.
func (c *SiteReplicationSys) PeerBucketSSEConfigHandler(ctx context.Context, bucket string, sseConfig *string, updatedAt time.Time) error {
// skip overwrite if local update is newer than peer update.
if !updatedAt.IsZero() {
if _, updateTm, err := globalBucketMetadataSys.GetSSEConfig(bucket); err == nil && updateTm.After(updatedAt) {
return nil
}
}
if sseConfig != nil {
configData, err := base64.StdEncoding.DecodeString(*sseConfig)
if err != nil {
return wrapSRErr(err)
}
_, err = globalBucketMetadataSys.Update(ctx, bucket, bucketSSEConfig, configData)
if err != nil {
return wrapSRErr(err)
}
return nil
}
// Delete sse config
_, err := globalBucketMetadataSys.Delete(ctx, bucket, bucketSSEConfig)
if err != nil {
return wrapSRErr(err)
}
return nil
}
// PeerBucketQuotaConfigHandler - copies/deletes policy to local cluster.
func (c *SiteReplicationSys) PeerBucketQuotaConfigHandler(ctx context.Context, bucket string, quota *madmin.BucketQuota, updatedAt time.Time) error {
// skip overwrite if local update is newer than peer update.
if !updatedAt.IsZero() {
if _, updateTm, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket); err == nil && updateTm.After(updatedAt) {
return nil
}
}
if quota != nil {
quotaData, err := json.Marshal(quota)
if err != nil {
return wrapSRErr(err)
}
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketQuotaConfigFile, quotaData); err != nil {
return wrapSRErr(err)
}
return nil
}
// Delete the bucket policy
_, err := globalBucketMetadataSys.Delete(ctx, bucket, bucketQuotaConfigFile)
if err != nil {
return wrapSRErr(err)
}
return nil
}
// getAdminClient - NOTE: ensure to take at least a read lock on SiteReplicationSys
// before calling this.
func (c *SiteReplicationSys) getAdminClient(ctx context.Context, deploymentID string) (*madmin.AdminClient, error) {
creds, err := c.getPeerCreds()
if err != nil {
return nil, err
}
peer, ok := c.state.Peers[deploymentID]
if !ok {
return nil, errSRPeerNotFound
}
return getAdminClient(peer.Endpoint, creds.AccessKey, creds.SecretKey)
}
// getAdminClientWithEndpoint - NOTE: ensure to take at least a read lock on SiteReplicationSys
// before calling this.
func (c *SiteReplicationSys) getAdminClientWithEndpoint(ctx context.Context, deploymentID, endpoint string) (*madmin.AdminClient, error) {
creds, err := c.getPeerCreds()
if err != nil {
return nil, err
}
if _, ok := c.state.Peers[deploymentID]; !ok {
return nil, errSRPeerNotFound
}
return getAdminClient(endpoint, creds.AccessKey, creds.SecretKey)
}
func (c *SiteReplicationSys) getPeerCreds() (*auth.Credentials, error) {
u, ok := globalIAMSys.store.GetUser(c.state.ServiceAccountAccessKey)
if !ok {
return nil, errors.New("site replication service account not found")
}
return &u.Credentials, nil
}
// listBuckets returns a consistent common view of latest unique buckets across
// sites, this is used for replication.
func (c *SiteReplicationSys) listBuckets(ctx context.Context) ([]BucketInfo, error) {
// If local has buckets, enable versioning on them, create them on peers
// and setup replication rules.
objAPI := newObjectLayerFn()
if objAPI == nil {
return nil, errSRObjectLayerNotReady
}
return objAPI.ListBuckets(ctx, BucketOptions{Deleted: true})
}
// syncToAllPeers is used for syncing local data to all remote peers, it is
// called once during initial "AddPeerClusters" request.
func (c *SiteReplicationSys) syncToAllPeers(ctx context.Context) error {
objAPI := newObjectLayerFn()
if objAPI == nil {
return errSRObjectLayerNotReady
}
buckets, err := objAPI.ListBuckets(ctx, BucketOptions{})
if err != nil {
return err
}
for _, bucketInfo := range buckets {
bucket := bucketInfo.Name
meta, err := globalBucketMetadataSys.GetConfigFromDisk(ctx, bucket)
if err != nil && !errors.Is(err, errConfigNotFound) {
return errSRBackendIssue(err)
}
opts := MakeBucketOptions{
LockEnabled: meta.ObjectLocking(),
CreatedAt: bucketInfo.Created.UTC(),
}
// Now call the MakeBucketHook on existing bucket - this will
// create buckets and replication rules on peer clusters.
if err = c.MakeBucketHook(ctx, bucket, opts); err != nil {
return errSRBucketConfigError(err)
}
// Replicate bucket policy if present.
policyJSON, tm := meta.PolicyConfigJSON, meta.PolicyConfigUpdatedAt
if len(policyJSON) > 0 {
err = c.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypePolicy,
Bucket: bucket,
Policy: policyJSON,
UpdatedAt: tm,
})
if err != nil {
return errSRBucketMetaError(err)
}
}
// Replicate bucket tags if present.
tagCfg, tm := meta.TaggingConfigXML, meta.TaggingConfigUpdatedAt
if len(tagCfg) > 0 {
tagCfgStr := base64.StdEncoding.EncodeToString(tagCfg)
err = c.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeTags,
Bucket: bucket,
Tags: &tagCfgStr,
UpdatedAt: tm,
})
if err != nil {
return errSRBucketMetaError(err)
}
}
// Replicate object-lock config if present.
objLockCfgData, tm := meta.ObjectLockConfigXML, meta.ObjectLockConfigUpdatedAt
if len(objLockCfgData) > 0 {
objLockStr := base64.StdEncoding.EncodeToString(objLockCfgData)
err = c.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeObjectLockConfig,
Bucket: bucket,
Tags: &objLockStr,
UpdatedAt: tm,
})
if err != nil {
return errSRBucketMetaError(err)
}
}
// Replicate existing bucket bucket encryption settings
sseConfigData, tm := meta.EncryptionConfigXML, meta.EncryptionConfigUpdatedAt
if len(sseConfigData) > 0 {
sseConfigStr := base64.StdEncoding.EncodeToString(sseConfigData)
err = c.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeSSEConfig,
Bucket: bucket,
SSEConfig: &sseConfigStr,
UpdatedAt: tm,
})
if err != nil {
return errSRBucketMetaError(err)
}
}
quotaConfigJSON, tm := meta.QuotaConfigJSON, meta.QuotaConfigUpdatedAt
if len(quotaConfigJSON) > 0 {
err = c.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeQuotaConfig,
Bucket: bucket,
Quota: quotaConfigJSON,
UpdatedAt: tm,
})
if err != nil {
return errSRBucketMetaError(err)
}
}
}
// Order matters from now on how the information is
// synced to remote sites.
// Policies should be synced first.
{
// Replicate IAM policies on local to all peers.
allPolicyDocs, err := globalIAMSys.ListPolicyDocs(ctx, "")
if err != nil {
return errSRBackendIssue(err)
}
for pname, pdoc := range allPolicyDocs {
policyJSON, err := json.Marshal(pdoc.Policy)
if err != nil {
return wrapSRErr(err)
}
err = c.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemPolicy,
Name: pname,
Policy: policyJSON,
UpdatedAt: pdoc.UpdateDate,
})
if err != nil {
return errSRIAMError(err)
}
}
}
// Next should be userAccounts those are local users, OIDC and LDAP will not
// may not have any local users.
{
userAccounts := make(map[string]UserIdentity)
err := globalIAMSys.store.loadUsers(ctx, regUser, userAccounts)
if err != nil {
return errSRBackendIssue(err)
}
for _, acc := range userAccounts {
if err := c.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemIAMUser,
IAMUser: &madmin.SRIAMUser{
AccessKey: acc.Credentials.AccessKey,
IsDeleteReq: false,
UserReq: &madmin.AddOrUpdateUserReq{
SecretKey: acc.Credentials.SecretKey,
Status: madmin.AccountStatus(acc.Credentials.Status),
},
},
UpdatedAt: acc.UpdatedAt,
}); err != nil {
return errSRIAMError(err)
}
}
}
// Next should be Groups for some of these users, LDAP might have some Group
// DNs here
{
groups := make(map[string]GroupInfo)
err := globalIAMSys.store.loadGroups(ctx, groups)
if err != nil {
return errSRBackendIssue(err)
}
for gname, group := range groups {
if err := c.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemGroupInfo,
GroupInfo: &madmin.SRGroupInfo{
UpdateReq: madmin.GroupAddRemove{
Group: gname,
Members: group.Members,
Status: madmin.GroupStatus(group.Status),
IsRemove: false,
},
},
UpdatedAt: group.UpdatedAt,
}); err != nil {
return errSRIAMError(err)
}
}
}
// Followed by group policy mapping
{
// Replicate policy mappings on local to all peers.
groupPolicyMap := make(map[string]MappedPolicy)
errG := globalIAMSys.store.loadMappedPolicies(ctx, unknownIAMUserType, true, groupPolicyMap)
if errG != nil {
return errSRBackendIssue(errG)
}
for group, mp := range groupPolicyMap {
err := c.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemPolicyMapping,
PolicyMapping: &madmin.SRPolicyMapping{
UserOrGroup: group,
UserType: -1,
IsGroup: true,
Policy: mp.Policies,
},
UpdatedAt: mp.UpdatedAt,
})
if err != nil {
return errSRIAMError(err)
}
}
}
// Service accounts are the static accounts that should be synced with
// valid claims.
{
serviceAccounts := make(map[string]UserIdentity)
err := globalIAMSys.store.loadUsers(ctx, svcUser, serviceAccounts)
if err != nil {
return errSRBackendIssue(err)
}
for user, acc := range serviceAccounts {
if user == siteReplicatorSvcAcc {
// skip the site replicate svc account as it is
// already replicated.
continue
}
claims, err := globalIAMSys.GetClaimsForSvcAcc(ctx, acc.Credentials.AccessKey)
if err != nil {
return errSRBackendIssue(err)
}
_, policy, err := globalIAMSys.GetServiceAccount(ctx, acc.Credentials.AccessKey)
if err != nil {
return errSRBackendIssue(err)
}
var policyJSON []byte
if policy != nil {
policyJSON, err = json.Marshal(policy)
if err != nil {
return wrapSRErr(err)
}
}
err = c.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemSvcAcc,
SvcAccChange: &madmin.SRSvcAccChange{
Create: &madmin.SRSvcAccCreate{
Parent: acc.Credentials.ParentUser,
AccessKey: user,
SecretKey: acc.Credentials.SecretKey,
Groups: acc.Credentials.Groups,
Claims: claims,
SessionPolicy: json.RawMessage(policyJSON),
Status: acc.Credentials.Status,
Name: acc.Credentials.Name,
Description: acc.Credentials.Description,
Expiration: &acc.Credentials.Expiration,
},
},
UpdatedAt: acc.UpdatedAt,
})
if err != nil {
return errSRIAMError(err)
}
}
}
// Followed by policy mapping for the userAccounts we previously synced.
{
// Replicate policy mappings on local to all peers.
userPolicyMap := make(map[string]MappedPolicy)
errU := globalIAMSys.store.loadMappedPolicies(ctx, regUser, false, userPolicyMap)
if errU != nil {
return errSRBackendIssue(errU)
}
for user, mp := range userPolicyMap {
err := c.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemPolicyMapping,
PolicyMapping: &madmin.SRPolicyMapping{
UserOrGroup: user,
UserType: int(regUser),
IsGroup: false,
Policy: mp.Policies,
},
UpdatedAt: mp.UpdatedAt,
})
if err != nil {
return errSRIAMError(err)
}
}
}
// and finally followed by policy mappings for for STS users.
{
// Replicate policy mappings on local to all peers.
stsPolicyMap := make(map[string]MappedPolicy)
errU := globalIAMSys.store.loadMappedPolicies(ctx, stsUser, false, stsPolicyMap)
if errU != nil {
return errSRBackendIssue(errU)
}
for user, mp := range stsPolicyMap {
err := c.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemPolicyMapping,
PolicyMapping: &madmin.SRPolicyMapping{
UserOrGroup: user,
UserType: int(stsUser),
IsGroup: false,
Policy: mp.Policies,
},
UpdatedAt: mp.UpdatedAt,
})
if err != nil {
return errSRIAMError(err)
}
}
}
return nil
}
// Concurrency helpers
type concErr struct {
errMap map[string]error
summaryErr error
}
func (c concErr) Error() string {
if c.summaryErr != nil {
return c.summaryErr.Error()
}
return "<nil>"
}
func (c concErr) Unwrap() error {
return c.summaryErr
}
func (c *SiteReplicationSys) toErrorFromErrMap(errMap map[string]error, actionName string) error {
if len(errMap) == 0 {
return nil
}
var success int
msgs := []string{}
for d, err := range errMap {
name := c.state.Peers[d].Name
if err == nil {
msgs = append(msgs, fmt.Sprintf("'%s' on site %s (%s): succeeded", actionName, name, d))
success++
} else {
msgs = append(msgs, fmt.Sprintf("'%s' on site %s (%s): failed(%v)", actionName, name, d, err))
}
}
if success == len(errMap) {
return nil
}
return fmt.Errorf("Site replication error(s): \n%s", strings.Join(msgs, "\n"))
}
func (c *SiteReplicationSys) newConcErr(errMap map[string]error, actionName string) error {
return concErr{
errMap: errMap,
summaryErr: c.toErrorFromErrMap(errMap, actionName),
}
}
// concDo calls actions concurrently. selfActionFn is run for the current
// cluster and peerActionFn is run for each peer replication cluster.
func (c *SiteReplicationSys) concDo(selfActionFn func() error, peerActionFn func(deploymentID string, p madmin.PeerInfo) error, actionName string) error {
depIDs := make([]string, 0, len(c.state.Peers))
for d := range c.state.Peers {
depIDs = append(depIDs, d)
}
errs := make([]error, len(c.state.Peers))
var wg sync.WaitGroup
wg.Add(len(depIDs))
for i := range depIDs {
go func(i int) {
defer wg.Done()
if depIDs[i] == globalDeploymentID {
if selfActionFn != nil {
errs[i] = selfActionFn()
}
} else {
errs[i] = peerActionFn(depIDs[i], c.state.Peers[depIDs[i]])
}
}(i)
}
wg.Wait()
errMap := make(map[string]error, len(c.state.Peers))
for i, depID := range depIDs {
errMap[depID] = errs[i]
}
return c.newConcErr(errMap, actionName)
}
func (c *SiteReplicationSys) annotateErr(annotation string, err error) error {
if err == nil {
return nil
}
return fmt.Errorf("%s: %s: %w", c.state.Name, annotation, err)
}
func (c *SiteReplicationSys) annotatePeerErr(dstPeer string, annotation string, err error) error {
if err == nil {
return nil
}
return fmt.Errorf("%s->%s: %s: %w", c.state.Name, dstPeer, annotation, err)
}
// isEnabled returns true if site replication is enabled
func (c *SiteReplicationSys) isEnabled() bool {
c.RLock()
defer c.RUnlock()
return c.enabled
}
var errMissingSRConfig = fmt.Errorf("unable to find site replication configuration")
// RemovePeerCluster - removes one or more clusters from site replication configuration.
func (c *SiteReplicationSys) RemovePeerCluster(ctx context.Context, objectAPI ObjectLayer, rreq madmin.SRRemoveReq) (st madmin.ReplicateRemoveStatus, err error) {
if !c.isEnabled() {
return st, errSRNotEnabled
}
info, err := c.GetClusterInfo(ctx)
if err != nil {
return st, errSRBackendIssue(err)
}
peerMap := make(map[string]madmin.PeerInfo)
var rmvEndpoints []string
siteNames := rreq.SiteNames
updatedPeers := make(map[string]madmin.PeerInfo)
for _, pi := range info.Sites {
updatedPeers[pi.DeploymentID] = pi
peerMap[pi.Name] = pi
if rreq.RemoveAll {
siteNames = append(siteNames, pi.Name)
}
}
for _, s := range siteNames {
pinfo, ok := peerMap[s]
if !ok {
return st, errSRConfigMissingError(errMissingSRConfig)
}
rmvEndpoints = append(rmvEndpoints, pinfo.Endpoint)
delete(updatedPeers, pinfo.DeploymentID)
}
var wg sync.WaitGroup
errs := make(map[string]error, len(c.state.Peers))
for _, v := range info.Sites {
wg.Add(1)
if v.DeploymentID == globalDeploymentID {
go func() {
defer wg.Done()
err := c.RemoveRemoteTargetsForEndpoint(ctx, objectAPI, rmvEndpoints, false)
errs[globalDeploymentID] = err
}()
continue
}
go func(pi madmin.PeerInfo) {
defer wg.Done()
admClient, err := c.getAdminClient(ctx, pi.DeploymentID)
if err != nil {
errs[pi.DeploymentID] = errSRPeerResp(fmt.Errorf("unable to create admin client for %s: %w", pi.Name, err))
return
}
// set the requesting site's deploymentID for verification of peer request
rreq.RequestingDepID = globalDeploymentID
if _, err = admClient.SRPeerRemove(ctx, rreq); err != nil {
if errors.Is(err, errMissingSRConfig) {
// ignore if peer is already removed.
return
}
errs[pi.DeploymentID] = errSRPeerResp(fmt.Errorf("unable to update peer %s: %w", pi.Name, err))
return
}
}(v)
}
wg.Wait()
errdID := ""
selfTgtsDeleted := errs[globalDeploymentID] == nil // true if all remote targets and replication config cleared successfully on local cluster
for dID, err := range errs {
if err != nil {
if !rreq.RemoveAll && !selfTgtsDeleted {
return madmin.ReplicateRemoveStatus{
ErrDetail: err.Error(),
Status: madmin.ReplicateRemoveStatusPartial,
}, errSRPeerResp(fmt.Errorf("unable to update peer %s: %w", c.state.Peers[dID].Name, err))
}
errdID = dID
}
}
// force local config to be cleared even if peers failed since the remote targets are deleted
// by now from the replication config and user intended to forcibly clear all site replication
if rreq.RemoveAll {
if err = c.removeFromDisk(ctx); err != nil {
return madmin.ReplicateRemoveStatus{
Status: madmin.ReplicateRemoveStatusPartial,
ErrDetail: fmt.Sprintf("unable to remove cluster-replication state on local: %v", err),
}, nil
}
if errdID != "" {
err := errs[errdID]
return madmin.ReplicateRemoveStatus{
Status: madmin.ReplicateRemoveStatusPartial,
ErrDetail: err.Error(),
}, nil
}
return madmin.ReplicateRemoveStatus{
Status: madmin.ReplicateRemoveStatusSuccess,
}, nil
}
// Update cluster state
var state srState
if len(updatedPeers) > 1 {
state = srState{
Name: info.Name,
Peers: updatedPeers,
ServiceAccountAccessKey: info.ServiceAccountAccessKey,
}
}
if err = c.saveToDisk(ctx, state); err != nil {
return madmin.ReplicateRemoveStatus{
Status: madmin.ReplicateRemoveStatusPartial,
ErrDetail: fmt.Sprintf("unable to save cluster-replication state on local: %v", err),
}, err
}
st = madmin.ReplicateRemoveStatus{
Status: madmin.ReplicateRemoveStatusSuccess,
}
if errs[errdID] != nil {
st.Status = madmin.ReplicateRemoveStatusPartial
st.ErrDetail = errs[errdID].Error()
}
return st, nil
}
// InternalRemoveReq - sends an unlink request to peer cluster to remove one or more sites
// from the site replication configuration.
func (c *SiteReplicationSys) InternalRemoveReq(ctx context.Context, objectAPI ObjectLayer, rreq madmin.SRRemoveReq) error {
if !c.isEnabled() {
return errSRNotEnabled
}
if rreq.RequestingDepID != "" {
// validate if requesting site is still part of site replication
var foundRequestor bool
for _, p := range c.state.Peers {
if p.DeploymentID == rreq.RequestingDepID {
foundRequestor = true
break
}
}
if !foundRequestor {
return errSRRequestorNotFound
}
}
ourName := ""
peerMap := make(map[string]madmin.PeerInfo)
updatedPeers := make(map[string]madmin.PeerInfo)
siteNames := rreq.SiteNames
for _, p := range c.state.Peers {
peerMap[p.Name] = p
if p.DeploymentID == globalDeploymentID {
ourName = p.Name
}
updatedPeers[p.DeploymentID] = p
if rreq.RemoveAll {
siteNames = append(siteNames, p.Name)
}
}
var rmvEndpoints []string
var unlinkSelf bool
for _, s := range siteNames {
info, ok := peerMap[s]
if !ok {
return errMissingSRConfig
}
if info.DeploymentID == globalDeploymentID {
unlinkSelf = true
continue
}
delete(updatedPeers, info.DeploymentID)
rmvEndpoints = append(rmvEndpoints, info.Endpoint)
}
if err := c.RemoveRemoteTargetsForEndpoint(ctx, objectAPI, rmvEndpoints, unlinkSelf); err != nil {
return err
}
var state srState
if !unlinkSelf {
state = srState{
Name: c.state.Name,
Peers: updatedPeers,
ServiceAccountAccessKey: c.state.ServiceAccountAccessKey,
}
}
if err := c.saveToDisk(ctx, state); err != nil {
return errSRBackendIssue(fmt.Errorf("unable to save cluster-replication state to drive on %s: %v", ourName, err))
}
return nil
}
// RemoveRemoteTargetsForEndpoint removes replication targets corresponding to endpoint
func (c *SiteReplicationSys) RemoveRemoteTargetsForEndpoint(ctx context.Context, objectAPI ObjectLayer, endpoints []string, unlinkSelf bool) (err error) {
targets := globalBucketTargetSys.ListTargets(ctx, "", string(madmin.ReplicationService))
m := make(map[string]madmin.BucketTarget)
for _, t := range targets {
for _, endpoint := range endpoints {
ep, _ := url.Parse(endpoint)
if t.Endpoint == ep.Host &&
t.Secure == (ep.Scheme == "https") &&
t.Type == madmin.ReplicationService {
m[t.Arn] = t
}
}
// all remote targets from self are to be delinked
if unlinkSelf {
m[t.Arn] = t
}
}
buckets, err := objectAPI.ListBuckets(ctx, BucketOptions{})
if err != nil {
return errSRBackendIssue(err)
}
for _, b := range buckets {
config, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, b.Name)
if err != nil {
if errors.Is(err, BucketReplicationConfigNotFound{Bucket: b.Name}) {
continue
}
return err
}
var nRules []sreplication.Rule
for _, r := range config.Rules {
if _, ok := m[r.Destination.Bucket]; !ok {
nRules = append(nRules, r)
}
}
if len(nRules) > 0 {
config.Rules = nRules
configData, err := xml.Marshal(config)
if err != nil {
return err
}
if _, err = globalBucketMetadataSys.Update(ctx, b.Name, bucketReplicationConfig, configData); err != nil {
return err
}
} else {
if _, err := globalBucketMetadataSys.Delete(ctx, b.Name, bucketReplicationConfig); err != nil {
return err
}
}
}
for arn, t := range m {
if err := globalBucketTargetSys.RemoveTarget(ctx, t.SourceBucket, arn); err != nil {
if errors.Is(err, BucketRemoteTargetNotFound{Bucket: t.SourceBucket}) {
continue
}
return err
}
targets, terr := globalBucketTargetSys.ListBucketTargets(ctx, t.SourceBucket)
if terr != nil {
return err
}
tgtBytes, terr := json.Marshal(&targets)
if terr != nil {
return err
}
if _, err = globalBucketMetadataSys.Update(ctx, t.SourceBucket, bucketTargetsFile, tgtBytes); err != nil {
return err
}
}
return
}
// Other helpers
func getAdminClient(endpoint, accessKey, secretKey string) (*madmin.AdminClient, error) {
epURL, err := url.Parse(endpoint)
if err != nil {
return nil, err
}
if globalBucketTargetSys.isOffline(epURL) {
return nil, RemoteTargetConnectionErr{Endpoint: epURL.String(), Err: fmt.Errorf("remote target is offline for endpoint %s", epURL.String())}
}
client, err := madmin.New(epURL.Host, accessKey, secretKey, epURL.Scheme == "https")
if err != nil {
return nil, err
}
client.SetCustomTransport(globalRemoteTargetTransport)
return client, nil
}
func getS3Client(pc madmin.PeerSite) (*minioClient.Client, error) {
ep, err := url.Parse(pc.Endpoint)
if err != nil {
return nil, err
}
if globalBucketTargetSys.isOffline(ep) {
return nil, RemoteTargetConnectionErr{Endpoint: ep.String(), Err: fmt.Errorf("remote target is offline for endpoint %s", ep.String())}
}
return minioClient.New(ep.Host, &minioClient.Options{
Creds: credentials.NewStaticV4(pc.AccessKey, pc.SecretKey, ""),
Secure: ep.Scheme == "https",
Transport: globalRemoteTargetTransport,
})
}
func getPriorityHelper(replicationConfig replication.Config) int {
maxPrio := 0
for _, rule := range replicationConfig.Rules {
if rule.Priority > maxPrio {
maxPrio = rule.Priority
}
}
// leave some gaps in priority numbers for flexibility
return maxPrio + 10
}
// returns a slice with site names participating in site replciation but unspecified while adding
// a new site.
func getMissingSiteNames(oldDeps, newDeps set.StringSet, currSites []madmin.PeerInfo) []string {
diff := oldDeps.Difference(newDeps)
var diffSlc []string
for _, v := range currSites {
if diff.Contains(v.DeploymentID) {
diffSlc = append(diffSlc, v.Name)
}
}
return diffSlc
}
type srBucketMetaInfo struct {
madmin.SRBucketInfo
DeploymentID string
}
type srPolicy struct {
madmin.SRIAMPolicy
DeploymentID string
}
type srPolicyMapping struct {
madmin.SRPolicyMapping
DeploymentID string
}
type srUserInfo struct {
madmin.UserInfo
DeploymentID string
}
type srGroupDesc struct {
madmin.GroupDesc
DeploymentID string
}
// SiteReplicationStatus returns the site replication status across clusters participating in site replication.
func (c *SiteReplicationSys) SiteReplicationStatus(ctx context.Context, objAPI ObjectLayer, opts madmin.SRStatusOptions) (info madmin.SRStatusInfo, err error) {
sinfo, err := c.siteReplicationStatus(ctx, objAPI, opts)
if err != nil {
return info, err
}
info = madmin.SRStatusInfo{
Enabled: sinfo.Enabled,
MaxBuckets: sinfo.MaxBuckets,
MaxUsers: sinfo.MaxUsers,
MaxGroups: sinfo.MaxGroups,
MaxPolicies: sinfo.MaxPolicies,
Sites: sinfo.Sites,
StatsSummary: sinfo.StatsSummary,
}
info.BucketStats = make(map[string]map[string]madmin.SRBucketStatsSummary, len(sinfo.Sites))
info.PolicyStats = make(map[string]map[string]madmin.SRPolicyStatsSummary)
info.UserStats = make(map[string]map[string]madmin.SRUserStatsSummary)
info.GroupStats = make(map[string]map[string]madmin.SRGroupStatsSummary)
numSites := len(info.Sites)
for b, stat := range sinfo.BucketStats {
for dID, st := range stat {
if st.TagMismatch ||
st.VersioningConfigMismatch ||
st.OLockConfigMismatch ||
st.SSEConfigMismatch ||
st.PolicyMismatch ||
st.ReplicationCfgMismatch ||
st.QuotaCfgMismatch ||
opts.Entity == madmin.SRBucketEntity {
if _, ok := info.BucketStats[b]; !ok {
info.BucketStats[b] = make(map[string]madmin.SRBucketStatsSummary, numSites)
}
info.BucketStats[b][dID] = st.SRBucketStatsSummary
}
}
}
for u, stat := range sinfo.UserStats {
for dID, st := range stat {
if st.PolicyMismatch || st.UserInfoMismatch || opts.Entity == madmin.SRUserEntity {
if _, ok := info.UserStats[u]; !ok {
info.UserStats[u] = make(map[string]madmin.SRUserStatsSummary, numSites)
}
info.UserStats[u][dID] = st.SRUserStatsSummary
}
}
}
for g, stat := range sinfo.GroupStats {
for dID, st := range stat {
if st.PolicyMismatch || st.GroupDescMismatch || opts.Entity == madmin.SRGroupEntity {
if _, ok := info.GroupStats[g]; !ok {
info.GroupStats[g] = make(map[string]madmin.SRGroupStatsSummary, numSites)
}
info.GroupStats[g][dID] = st.SRGroupStatsSummary
}
}
}
for p, stat := range sinfo.PolicyStats {
for dID, st := range stat {
if st.PolicyMismatch || opts.Entity == madmin.SRPolicyEntity {
if _, ok := info.PolicyStats[p]; !ok {
info.PolicyStats[p] = make(map[string]madmin.SRPolicyStatsSummary, numSites)
}
info.PolicyStats[p][dID] = st.SRPolicyStatsSummary
}
}
}
return
}
const (
replicationStatus = "ReplicationStatus"
)
// siteReplicationStatus returns the site replication status across clusters participating in site replication.
func (c *SiteReplicationSys) siteReplicationStatus(ctx context.Context, objAPI ObjectLayer, opts madmin.SRStatusOptions) (info srStatusInfo, err error) {
c.RLock()
defer c.RUnlock()
if !c.enabled {
return info, err
}
sris := make([]madmin.SRInfo, len(c.state.Peers))
depIdx := make(map[string]int, len(c.state.Peers))
i := 0
for d := range c.state.Peers {
depIdx[d] = i
i++
}
metaInfoConcErr := c.concDo(
func() error {
srInfo, err := c.SiteReplicationMetaInfo(ctx, objAPI, opts)
if err != nil {
return err
}
sris[depIdx[globalDeploymentID]] = srInfo
return nil
},
func(deploymentID string, p madmin.PeerInfo) error {
admClient, err := c.getAdminClient(ctx, deploymentID)
if err != nil {
return err
}
srInfo, err := admClient.SRMetaInfo(ctx, opts)
if err != nil {
return err
}
sris[depIdx[deploymentID]] = srInfo
return nil
},
replicationStatus,
)
if err := errors.Unwrap(metaInfoConcErr); err != nil {
return info, errSRBackendIssue(err)
}
info.Enabled = true
info.Sites = make(map[string]madmin.PeerInfo, len(c.state.Peers))
for d, peer := range c.state.Peers {
info.Sites[d] = peer
}
var maxBuckets int
for _, sri := range sris {
if len(sri.Buckets) > maxBuckets {
maxBuckets = len(sri.Buckets)
}
}
// mapping b/w entity and entity config across sites
bucketStats := make(map[string][]srBucketMetaInfo)
policyStats := make(map[string][]srPolicy)
userPolicyStats := make(map[string][]srPolicyMapping)
groupPolicyStats := make(map[string][]srPolicyMapping)
userInfoStats := make(map[string][]srUserInfo)
groupDescStats := make(map[string][]srGroupDesc)
numSites := len(sris)
allBuckets := set.NewStringSet() // across sites
allUsers := set.NewStringSet()
allUserWPolicies := set.NewStringSet()
allGroups := set.NewStringSet()
allGroupWPolicies := set.NewStringSet()
allPolicies := set.NewStringSet()
for _, sri := range sris {
for b := range sri.Buckets {
allBuckets.Add(b)
}
for u := range sri.UserInfoMap {
allUsers.Add(u)
}
for g := range sri.GroupDescMap {
allGroups.Add(g)
}
for p := range sri.Policies {
allPolicies.Add(p)
}
for u := range sri.UserPolicies {
allUserWPolicies.Add(u)
}
for g := range sri.GroupPolicies {
allGroupWPolicies.Add(g)
}
}
for i, sri := range sris {
for b := range allBuckets {
if _, ok := bucketStats[b]; !ok {
bucketStats[b] = make([]srBucketMetaInfo, numSites)
}
si, ok := sri.Buckets[b]
if !ok {
si = madmin.SRBucketInfo{Bucket: b}
}
bucketStats[b][i] = srBucketMetaInfo{SRBucketInfo: si, DeploymentID: sri.DeploymentID}
}
for pname := range allPolicies {
if _, ok := policyStats[pname]; !ok {
policyStats[pname] = make([]srPolicy, numSites)
}
// if pname is not present in the map, the zero value
// will be returned.
pi := sri.Policies[pname]
policyStats[pname][i] = srPolicy{SRIAMPolicy: pi, DeploymentID: sri.DeploymentID}
}
for user := range allUserWPolicies {
if _, ok := userPolicyStats[user]; !ok {
userPolicyStats[user] = make([]srPolicyMapping, numSites)
}
up := sri.UserPolicies[user]
userPolicyStats[user][i] = srPolicyMapping{SRPolicyMapping: up, DeploymentID: sri.DeploymentID}
}
for group := range allGroupWPolicies {
if _, ok := groupPolicyStats[group]; !ok {
groupPolicyStats[group] = make([]srPolicyMapping, numSites)
}
up := sri.GroupPolicies[group]
groupPolicyStats[group][i] = srPolicyMapping{SRPolicyMapping: up, DeploymentID: sri.DeploymentID}
}
for u := range allUsers {
if _, ok := userInfoStats[u]; !ok {
userInfoStats[u] = make([]srUserInfo, numSites)
}
ui := sri.UserInfoMap[u]
userInfoStats[u][i] = srUserInfo{UserInfo: ui, DeploymentID: sri.DeploymentID}
}
for g := range allGroups {
if _, ok := groupDescStats[g]; !ok {
groupDescStats[g] = make([]srGroupDesc, numSites)
}
gd := sri.GroupDescMap[g]
groupDescStats[g][i] = srGroupDesc{GroupDesc: gd, DeploymentID: sri.DeploymentID}
}
}
info.StatsSummary = make(map[string]madmin.SRSiteSummary, len(c.state.Peers))
info.BucketStats = make(map[string]map[string]srBucketStatsSummary)
info.PolicyStats = make(map[string]map[string]srPolicyStatsSummary)
info.UserStats = make(map[string]map[string]srUserStatsSummary)
info.GroupStats = make(map[string]map[string]srGroupStatsSummary)
// collect user policy mapping replication status across sites
if opts.Users || opts.Entity == madmin.SRUserEntity {
for u, pslc := range userPolicyStats {
if len(info.UserStats[u]) == 0 {
info.UserStats[u] = make(map[string]srUserStatsSummary)
}
var policyMappings []madmin.SRPolicyMapping
uPolicyCount := 0
for _, ps := range pslc {
policyMappings = append(policyMappings, ps.SRPolicyMapping)
uPolicyCount++
sum := info.StatsSummary[ps.DeploymentID]
sum.TotalUserPolicyMappingCount++
info.StatsSummary[ps.DeploymentID] = sum
}
userPolicyMismatch := !isPolicyMappingReplicated(uPolicyCount, numSites, policyMappings)
for _, ps := range pslc {
dID := depIdx[ps.DeploymentID]
_, hasUser := sris[dID].UserPolicies[u]
info.UserStats[u][ps.DeploymentID] = srUserStatsSummary{
SRUserStatsSummary: madmin.SRUserStatsSummary{
PolicyMismatch: userPolicyMismatch,
HasUser: hasUser,
HasPolicyMapping: ps.Policy != "",
},
userPolicy: ps,
}
if !userPolicyMismatch || opts.Entity != madmin.SRUserEntity {
sum := info.StatsSummary[ps.DeploymentID]
if !ps.IsGroup {
sum.ReplicatedUserPolicyMappings++
}
info.StatsSummary[ps.DeploymentID] = sum
}
}
}
// collect user info replication status across sites
for u, pslc := range userInfoStats {
var uiSlc []madmin.UserInfo
userCount := 0
for _, ps := range pslc {
uiSlc = append(uiSlc, ps.UserInfo)
userCount++
sum := info.StatsSummary[ps.DeploymentID]
sum.TotalUsersCount++
info.StatsSummary[ps.DeploymentID] = sum
}
userInfoMismatch := !isUserInfoReplicated(userCount, numSites, uiSlc)
for _, ps := range pslc {
dID := depIdx[ps.DeploymentID]
_, hasUser := sris[dID].UserInfoMap[u]
if len(info.UserStats[u]) == 0 {
info.UserStats[u] = make(map[string]srUserStatsSummary)
}
umis, ok := info.UserStats[u][ps.DeploymentID]
if !ok {
umis = srUserStatsSummary{
SRUserStatsSummary: madmin.SRUserStatsSummary{
HasUser: hasUser,
},
}
}
umis.UserInfoMismatch = userInfoMismatch
umis.userInfo = ps
info.UserStats[u][ps.DeploymentID] = umis
if !userInfoMismatch || opts.Entity != madmin.SRUserEntity {
sum := info.StatsSummary[ps.DeploymentID]
sum.ReplicatedUsers++
info.StatsSummary[ps.DeploymentID] = sum
}
}
}
}
if opts.Groups || opts.Entity == madmin.SRGroupEntity {
// collect group policy mapping replication status across sites
for g, pslc := range groupPolicyStats {
var policyMappings []madmin.SRPolicyMapping
gPolicyCount := 0
for _, ps := range pslc {
policyMappings = append(policyMappings, ps.SRPolicyMapping)
gPolicyCount++
sum := info.StatsSummary[ps.DeploymentID]
sum.TotalGroupPolicyMappingCount++
info.StatsSummary[ps.DeploymentID] = sum
}
groupPolicyMismatch := !isPolicyMappingReplicated(gPolicyCount, numSites, policyMappings)
if len(info.GroupStats[g]) == 0 {
info.GroupStats[g] = make(map[string]srGroupStatsSummary)
}
for _, ps := range pslc {
dID := depIdx[ps.DeploymentID]
_, hasGroup := sris[dID].GroupPolicies[g]
info.GroupStats[g][ps.DeploymentID] = srGroupStatsSummary{
SRGroupStatsSummary: madmin.SRGroupStatsSummary{
PolicyMismatch: groupPolicyMismatch,
HasGroup: hasGroup,
HasPolicyMapping: ps.Policy != "",
DeploymentID: ps.DeploymentID,
},
groupPolicy: ps,
}
if !groupPolicyMismatch && opts.Entity != madmin.SRGroupEntity {
sum := info.StatsSummary[ps.DeploymentID]
sum.ReplicatedGroupPolicyMappings++
info.StatsSummary[ps.DeploymentID] = sum
}
}
}
// collect group desc replication status across sites
for g, pslc := range groupDescStats {
var gds []madmin.GroupDesc
groupCount := 0
for _, ps := range pslc {
groupCount++
sum := info.StatsSummary[ps.DeploymentID]
sum.TotalGroupsCount++
info.StatsSummary[ps.DeploymentID] = sum
gds = append(gds, ps.GroupDesc)
}
gdMismatch := !isGroupDescReplicated(groupCount, numSites, gds)
for _, ps := range pslc {
dID := depIdx[ps.DeploymentID]
_, hasGroup := sris[dID].GroupDescMap[g]
if len(info.GroupStats[g]) == 0 {
info.GroupStats[g] = make(map[string]srGroupStatsSummary)
}
gmis, ok := info.GroupStats[g][ps.DeploymentID]
if !ok {
gmis = srGroupStatsSummary{
SRGroupStatsSummary: madmin.SRGroupStatsSummary{
HasGroup: hasGroup,
},
}
}
gmis.GroupDescMismatch = gdMismatch
gmis.groupDesc = ps
info.GroupStats[g][ps.DeploymentID] = gmis
if !gdMismatch && opts.Entity != madmin.SRGroupEntity {
sum := info.StatsSummary[ps.DeploymentID]
sum.ReplicatedGroups++
info.StatsSummary[ps.DeploymentID] = sum
}
}
}
}
if opts.Policies || opts.Entity == madmin.SRPolicyEntity {
// collect IAM policy replication status across sites
for p, pslc := range policyStats {
var policies []*iampolicy.Policy
uPolicyCount := 0
for _, ps := range pslc {
plcy, err := iampolicy.ParseConfig(bytes.NewReader([]byte(ps.SRIAMPolicy.Policy)))
if err != nil {
continue
}
policies = append(policies, plcy)
uPolicyCount++
sum := info.StatsSummary[ps.DeploymentID]
sum.TotalIAMPoliciesCount++
info.StatsSummary[ps.DeploymentID] = sum
}
if len(info.PolicyStats[p]) == 0 {
info.PolicyStats[p] = make(map[string]srPolicyStatsSummary)
}
policyMismatch := !isIAMPolicyReplicated(uPolicyCount, numSites, policies)
for _, ps := range pslc {
dID := depIdx[ps.DeploymentID]
_, hasPolicy := sris[dID].Policies[p]
info.PolicyStats[p][ps.DeploymentID] = srPolicyStatsSummary{
SRPolicyStatsSummary: madmin.SRPolicyStatsSummary{
PolicyMismatch: policyMismatch,
HasPolicy: hasPolicy,
},
policy: ps,
}
switch {
case policyMismatch, opts.Entity == madmin.SRPolicyEntity:
default:
sum := info.StatsSummary[ps.DeploymentID]
if !policyMismatch {
sum.ReplicatedIAMPolicies++
}
info.StatsSummary[ps.DeploymentID] = sum
}
}
}
}
if opts.Buckets || opts.Entity == madmin.SRBucketEntity {
// collect bucket metadata replication stats across sites
for b, slc := range bucketStats {
tagSet := set.NewStringSet()
olockConfigSet := set.NewStringSet()
policies := make([]*bktpolicy.Policy, numSites)
replCfgs := make([]*sreplication.Config, numSites)
quotaCfgs := make([]*madmin.BucketQuota, numSites)
sseCfgSet := set.NewStringSet()
versionCfgSet := set.NewStringSet()
var tagCount, olockCfgCount, sseCfgCount, versionCfgCount int
for i, s := range slc {
if s.ReplicationConfig != nil {
cfgBytes, err := base64.StdEncoding.DecodeString(*s.ReplicationConfig)
if err != nil {
continue
}
cfg, err := sreplication.ParseConfig(bytes.NewReader(cfgBytes))
if err != nil {
continue
}
replCfgs[i] = cfg
}
if s.Versioning != nil {
configData, err := base64.StdEncoding.DecodeString(*s.Versioning)
if err != nil {
continue
}
versionCfgCount++
if !versionCfgSet.Contains(string(configData)) {
versionCfgSet.Add(string(configData))
}
}
if s.QuotaConfig != nil {
cfgBytes, err := base64.StdEncoding.DecodeString(*s.QuotaConfig)
if err != nil {
continue
}
cfg, err := parseBucketQuota(b, cfgBytes)
if err != nil {
continue
}
quotaCfgs[i] = cfg
}
if s.Tags != nil {
tagBytes, err := base64.StdEncoding.DecodeString(*s.Tags)
if err != nil {
continue
}
tagCount++
if !tagSet.Contains(string(tagBytes)) {
tagSet.Add(string(tagBytes))
}
}
if len(s.Policy) > 0 {
plcy, err := bktpolicy.ParseConfig(bytes.NewReader(s.Policy), b)
if err != nil {
continue
}
policies[i] = plcy
}
if s.ObjectLockConfig != nil {
configData, err := base64.StdEncoding.DecodeString(*s.ObjectLockConfig)
if err != nil {
continue
}
olockCfgCount++
if !olockConfigSet.Contains(string(configData)) {
olockConfigSet.Add(string(configData))
}
}
if s.SSEConfig != nil {
configData, err := base64.StdEncoding.DecodeString(*s.SSEConfig)
if err != nil {
continue
}
sseCfgCount++
if !sseCfgSet.Contains(string(configData)) {
sseCfgSet.Add(string(configData))
}
}
ss, ok := info.StatsSummary[s.DeploymentID]
if !ok {
ss = madmin.SRSiteSummary{}
}
// increment total number of replicated buckets
if len(slc) == numSites {
ss.ReplicatedBuckets++
}
ss.TotalBucketsCount++
if tagCount > 0 {
ss.TotalTagsCount++
}
if olockCfgCount > 0 {
ss.TotalLockConfigCount++
}
if sseCfgCount > 0 {
ss.TotalSSEConfigCount++
}
if versionCfgCount > 0 {
ss.TotalVersioningConfigCount++
}
if len(policies) > 0 {
ss.TotalBucketPoliciesCount++
}
info.StatsSummary[s.DeploymentID] = ss
}
tagMismatch := !isReplicated(tagCount, numSites, tagSet)
olockCfgMismatch := !isReplicated(olockCfgCount, numSites, olockConfigSet)
sseCfgMismatch := !isReplicated(sseCfgCount, numSites, sseCfgSet)
versionCfgMismatch := !isReplicated(versionCfgCount, numSites, versionCfgSet)
policyMismatch := !isBktPolicyReplicated(numSites, policies)
replCfgMismatch := !isBktReplCfgReplicated(numSites, replCfgs)
quotaCfgMismatch := !isBktQuotaCfgReplicated(numSites, quotaCfgs)
info.BucketStats[b] = make(map[string]srBucketStatsSummary, numSites)
for i, s := range slc {
dIdx := depIdx[s.DeploymentID]
var hasBucket, isBucketMarkedDeleted bool
bi, ok := sris[dIdx].Buckets[s.Bucket]
if ok {
isBucketMarkedDeleted = !bi.DeletedAt.IsZero() && (bi.CreatedAt.IsZero() || bi.DeletedAt.After(bi.CreatedAt))
hasBucket = !bi.CreatedAt.IsZero()
}
quotaCfgSet := hasBucket && quotaCfgs[i] != nil && *quotaCfgs[i] != madmin.BucketQuota{}
ss := madmin.SRBucketStatsSummary{
DeploymentID: s.DeploymentID,
HasBucket: hasBucket,
BucketMarkedDeleted: isBucketMarkedDeleted,
TagMismatch: tagMismatch,
OLockConfigMismatch: olockCfgMismatch,
SSEConfigMismatch: sseCfgMismatch,
VersioningConfigMismatch: versionCfgMismatch,
PolicyMismatch: policyMismatch,
ReplicationCfgMismatch: replCfgMismatch,
QuotaCfgMismatch: quotaCfgMismatch,
HasReplicationCfg: s.ReplicationConfig != nil,
HasTagsSet: s.Tags != nil,
HasOLockConfigSet: s.ObjectLockConfig != nil,
HasPolicySet: s.Policy != nil,
HasQuotaCfgSet: quotaCfgSet,
HasSSECfgSet: s.SSEConfig != nil,
}
var m srBucketMetaInfo
if len(bucketStats[s.Bucket]) > dIdx {
m = bucketStats[s.Bucket][dIdx]
}
info.BucketStats[b][s.DeploymentID] = srBucketStatsSummary{
SRBucketStatsSummary: ss,
meta: m,
}
}
// no mismatch
for _, s := range slc {
sum := info.StatsSummary[s.DeploymentID]
if !olockCfgMismatch && olockCfgCount == numSites {
sum.ReplicatedLockConfig++
}
if !versionCfgMismatch && versionCfgCount == numSites {
sum.ReplicatedVersioningConfig++
}
if !sseCfgMismatch && sseCfgCount == numSites {
sum.ReplicatedSSEConfig++
}
if !policyMismatch && len(policies) == numSites {
sum.ReplicatedBucketPolicies++
}
if !tagMismatch && tagCount == numSites {
sum.ReplicatedTags++
}
info.StatsSummary[s.DeploymentID] = sum
}
}
}
// maximum buckets users etc seen across sites
info.MaxBuckets = len(bucketStats)
info.MaxUsers = len(userInfoStats)
info.MaxGroups = len(groupDescStats)
info.MaxPolicies = len(policyStats)
return
}
// isReplicated returns true if count of replicated matches the number of
// sites and there is atmost one unique entry in the set.
func isReplicated(cntReplicated, total int, valSet set.StringSet) bool {
if cntReplicated > 0 && cntReplicated < total {
return false
}
if len(valSet) > 1 {
// mismatch - one or more sites has differing tags/policy
return false
}
return true
}
// isIAMPolicyReplicated returns true if count of replicated IAM policies matches total
// number of sites and IAM policies are identical.
func isIAMPolicyReplicated(cntReplicated, total int, policies []*iampolicy.Policy) bool {
if cntReplicated > 0 && cntReplicated != total {
return false
}
// check if policies match between sites
var prev *iampolicy.Policy
for i, p := range policies {
if i == 0 {
prev = p
continue
}
if !prev.Equals(*p) {
return false
}
}
return true
}
// isPolicyMappingReplicated returns true if count of replicated IAM policy mappings matches total
// number of sites and IAM policy mappings are identical.
func isPolicyMappingReplicated(cntReplicated, total int, policies []madmin.SRPolicyMapping) bool {
if cntReplicated > 0 && cntReplicated != total {
return false
}
// check if policies match between sites
var prev madmin.SRPolicyMapping
for i, p := range policies {
if i == 0 {
prev = p
continue
}
if prev.IsGroup != p.IsGroup ||
prev.Policy != p.Policy ||
prev.UserOrGroup != p.UserOrGroup {
return false
}
}
return true
}
func isUserInfoReplicated(cntReplicated, total int, uis []madmin.UserInfo) bool {
if cntReplicated > 0 && cntReplicated != total {
return false
}
// check if policies match between sites
var prev madmin.UserInfo
for i, ui := range uis {
if i == 0 {
prev = ui
continue
}
if !isUserInfoEqual(prev, ui) {
return false
}
}
return true
}
func isGroupDescReplicated(cntReplicated, total int, gds []madmin.GroupDesc) bool {
if cntReplicated > 0 && cntReplicated != total {
return false
}
// check if policies match between sites
var prev madmin.GroupDesc
for i, gd := range gds {
if i == 0 {
prev = gd
continue
}
if !isGroupDescEqual(prev, gd) {
return false
}
}
return true
}
func isBktQuotaCfgReplicated(total int, quotaCfgs []*madmin.BucketQuota) bool {
numquotaCfgs := 0
for _, q := range quotaCfgs {
if q == nil {
continue
}
numquotaCfgs++
}
if numquotaCfgs == 0 {
return true
}
if numquotaCfgs > 0 && numquotaCfgs != total {
return false
}
var prev *madmin.BucketQuota
for i, q := range quotaCfgs {
if q == nil {
return false
}
if i == 0 {
prev = q
continue
}
if prev.Quota != q.Quota || prev.Type != q.Type {
return false
}
}
return true
}
// isBktPolicyReplicated returns true if count of replicated bucket policies matches total
// number of sites and bucket policies are identical.
func isBktPolicyReplicated(total int, policies []*bktpolicy.Policy) bool {
numPolicies := 0
for _, p := range policies {
if p == nil {
continue
}
numPolicies++
}
if numPolicies > 0 && numPolicies != total {
return false
}
// check if policies match between sites
var prev *bktpolicy.Policy
for i, p := range policies {
if p == nil {
continue
}
if i == 0 {
prev = p
continue
}
if !prev.Equals(*p) {
return false
}
}
return true
}
// isBktReplCfgReplicated returns true if all the sites have same number
// of replication rules with all replication features enabled.
func isBktReplCfgReplicated(total int, cfgs []*sreplication.Config) bool {
cntReplicated := 0
for _, c := range cfgs {
if c == nil {
continue
}
cntReplicated++
}
if cntReplicated > 0 && cntReplicated != total {
return false
}
// check if policies match between sites
var prev *sreplication.Config
for i, c := range cfgs {
if c == nil {
continue
}
if i == 0 {
prev = c
continue
}
if len(prev.Rules) != len(c.Rules) {
return false
}
if len(c.Rules) != total-1 {
return false
}
for _, r := range c.Rules {
if !strings.HasPrefix(r.ID, "site-repl-") {
return false
}
if r.DeleteMarkerReplication.Status == sreplication.Disabled ||
r.DeleteReplication.Status == sreplication.Disabled ||
r.ExistingObjectReplication.Status == sreplication.Disabled ||
r.SourceSelectionCriteria.ReplicaModifications.Status == sreplication.Disabled {
return false
}
}
}
return true
}
// cache of IAM info fetched in last SiteReplicationMetaInfo call
type srIAMCache struct {
sync.RWMutex
lastUpdate time.Time
srIAMInfo madmin.SRInfo // caches IAM info
}
func (c *SiteReplicationSys) getSRCachedIAMInfo() (info madmin.SRInfo, ok bool) {
c.iamMetaCache.RLock()
defer c.iamMetaCache.RUnlock()
if c.iamMetaCache.lastUpdate.IsZero() {
return info, false
}
if time.Since(c.iamMetaCache.lastUpdate) < siteHealTimeInterval {
return c.iamMetaCache.srIAMInfo, true
}
return info, false
}
func (c *SiteReplicationSys) srCacheIAMInfo(info madmin.SRInfo) {
c.iamMetaCache.Lock()
defer c.iamMetaCache.Unlock()
c.iamMetaCache.srIAMInfo = info
c.iamMetaCache.lastUpdate = time.Now()
}
// SiteReplicationMetaInfo returns the metadata info on buckets, policies etc for the replicated site
func (c *SiteReplicationSys) SiteReplicationMetaInfo(ctx context.Context, objAPI ObjectLayer, opts madmin.SRStatusOptions) (info madmin.SRInfo, err error) {
if objAPI == nil {
return info, errSRObjectLayerNotReady
}
c.RLock()
defer c.RUnlock()
if !c.enabled {
return info, nil
}
info.DeploymentID = globalDeploymentID
if opts.Buckets || opts.Entity == madmin.SRBucketEntity {
var (
buckets []BucketInfo
err error
)
if opts.Entity == madmin.SRBucketEntity {
bi, err := objAPI.GetBucketInfo(ctx, opts.EntityValue, BucketOptions{Deleted: opts.ShowDeleted})
if err != nil {
if isErrBucketNotFound(err) {
return info, nil
}
return info, errSRBackendIssue(err)
}
buckets = append(buckets, bi)
} else {
buckets, err = objAPI.ListBuckets(ctx, BucketOptions{Deleted: opts.ShowDeleted})
if err != nil {
return info, errSRBackendIssue(err)
}
}
info.Buckets = make(map[string]madmin.SRBucketInfo, len(buckets))
for _, bucketInfo := range buckets {
bucket := bucketInfo.Name
bucketExists := bucketInfo.Deleted.IsZero() || (!bucketInfo.Created.IsZero() && bucketInfo.Created.After(bucketInfo.Deleted))
bms := madmin.SRBucketInfo{
Bucket: bucket,
CreatedAt: bucketInfo.Created.UTC(),
DeletedAt: bucketInfo.Deleted.UTC(),
}
if !bucketExists {
info.Buckets[bucket] = bms
continue
}
meta, err := globalBucketMetadataSys.GetConfigFromDisk(ctx, bucket)
if err != nil && !errors.Is(err, errConfigNotFound) {
return info, errSRBackendIssue(err)
}
bms.Policy = meta.PolicyConfigJSON
bms.PolicyUpdatedAt = meta.PolicyConfigUpdatedAt
if len(meta.TaggingConfigXML) > 0 {
tagCfgStr := base64.StdEncoding.EncodeToString(meta.TaggingConfigXML)
bms.Tags = &tagCfgStr
bms.TagConfigUpdatedAt = meta.TaggingConfigUpdatedAt
}
if len(meta.VersioningConfigXML) > 0 {
versioningCfgStr := base64.StdEncoding.EncodeToString(meta.VersioningConfigXML)
bms.Versioning = &versioningCfgStr
bms.VersioningConfigUpdatedAt = meta.VersioningConfigUpdatedAt
}
if len(meta.ObjectLockConfigXML) > 0 {
objLockStr := base64.StdEncoding.EncodeToString(meta.ObjectLockConfigXML)
bms.ObjectLockConfig = &objLockStr
bms.ObjectLockConfigUpdatedAt = meta.ObjectLockConfigUpdatedAt
}
if len(meta.QuotaConfigJSON) > 0 {
quotaConfigStr := base64.StdEncoding.EncodeToString(meta.QuotaConfigJSON)
bms.QuotaConfig = "aConfigStr
bms.QuotaConfigUpdatedAt = meta.QuotaConfigUpdatedAt
}
if len(meta.EncryptionConfigXML) > 0 {
sseConfigStr := base64.StdEncoding.EncodeToString(meta.EncryptionConfigXML)
bms.SSEConfig = &sseConfigStr
bms.SSEConfigUpdatedAt = meta.EncryptionConfigUpdatedAt
}
if len(meta.ReplicationConfigXML) > 0 {
rcfgXMLStr := base64.StdEncoding.EncodeToString(meta.ReplicationConfigXML)
bms.ReplicationConfig = &rcfgXMLStr
bms.ReplicationConfigUpdatedAt = meta.ReplicationConfigUpdatedAt
}
info.Buckets[bucket] = bms
}
}
if opts.Users && opts.Groups && opts.Policies && !opts.Buckets {
// serialize SiteReplicationMetaInfo calls - if data in cache is within
// healing interval, avoid fetching IAM data again from disk.
if metaInfo, ok := c.getSRCachedIAMInfo(); ok {
return metaInfo, nil
}
}
if opts.Policies || opts.Entity == madmin.SRPolicyEntity {
var allPolicies map[string]PolicyDoc
if opts.Entity == madmin.SRPolicyEntity {
if p, err := globalIAMSys.store.GetPolicyDoc(opts.EntityValue); err == nil {
allPolicies = map[string]PolicyDoc{opts.EntityValue: p}
}
} else {
// Replicate IAM policies on local to all peers.
allPolicies, err = globalIAMSys.store.listPolicyDocs(ctx, "")
if err != nil {
return info, errSRBackendIssue(err)
}
}
info.Policies = make(map[string]madmin.SRIAMPolicy, len(allPolicies))
for pname, policyDoc := range allPolicies {
policyJSON, err := json.Marshal(policyDoc.Policy)
if err != nil {
return info, wrapSRErr(err)
}
info.Policies[pname] = madmin.SRIAMPolicy{Policy: json.RawMessage(policyJSON), UpdatedAt: policyDoc.UpdateDate}
}
}
if opts.Users || opts.Entity == madmin.SRUserEntity {
// Replicate policy mappings on local to all peers.
userPolicyMap := make(map[string]MappedPolicy)
if opts.Entity == madmin.SRUserEntity {
if mp, ok := globalIAMSys.store.GetMappedPolicy(opts.EntityValue, false); ok {
userPolicyMap[opts.EntityValue] = mp
}
} else {
stsErr := globalIAMSys.store.loadMappedPolicies(ctx, stsUser, false, userPolicyMap)
if stsErr != nil {
return info, errSRBackendIssue(stsErr)
}
usrErr := globalIAMSys.store.loadMappedPolicies(ctx, regUser, false, userPolicyMap)
if usrErr != nil {
return info, errSRBackendIssue(usrErr)
}
svcErr := globalIAMSys.store.loadMappedPolicies(ctx, svcUser, false, userPolicyMap)
if svcErr != nil {
return info, errSRBackendIssue(svcErr)
}
}
info.UserPolicies = make(map[string]madmin.SRPolicyMapping, len(userPolicyMap))
for user, mp := range userPolicyMap {
info.UserPolicies[user] = madmin.SRPolicyMapping{
IsGroup: false,
UserOrGroup: user,
Policy: mp.Policies,
UpdatedAt: mp.UpdatedAt,
}
}
info.UserInfoMap = make(map[string]madmin.UserInfo)
if opts.Entity == madmin.SRUserEntity {
if ui, err := globalIAMSys.GetUserInfo(ctx, opts.EntityValue); err == nil {
info.UserInfoMap[opts.EntityValue] = ui
}
} else {
userAccounts := make(map[string]UserIdentity)
uerr := globalIAMSys.store.loadUsers(ctx, regUser, userAccounts)
if uerr != nil {
return info, errSRBackendIssue(uerr)
}
serr := globalIAMSys.store.loadUsers(ctx, svcUser, userAccounts)
if serr != nil {
return info, errSRBackendIssue(serr)
}
terr := globalIAMSys.store.loadUsers(ctx, stsUser, userAccounts)
if terr != nil {
return info, errSRBackendIssue(terr)
}
for k, v := range userAccounts {
if k == siteReplicatorSvcAcc {
// skip the site replicate svc account as it is
// already replicated.
continue
}
if v.Credentials.ParentUser != "" && v.Credentials.ParentUser == globalActiveCred.AccessKey {
// skip all root user service accounts.
continue
}
info.UserInfoMap[k] = madmin.UserInfo{
Status: madmin.AccountStatus(v.Credentials.Status),
}
}
}
}
if opts.Groups || opts.Entity == madmin.SRGroupEntity {
// Replicate policy mappings on local to all peers.
groupPolicyMap := make(map[string]MappedPolicy)
if opts.Entity == madmin.SRGroupEntity {
if mp, ok := globalIAMSys.store.GetMappedPolicy(opts.EntityValue, true); ok {
groupPolicyMap[opts.EntityValue] = mp
}
} else {
stsErr := globalIAMSys.store.loadMappedPolicies(ctx, stsUser, true, groupPolicyMap)
if stsErr != nil {
return info, errSRBackendIssue(stsErr)
}
userErr := globalIAMSys.store.loadMappedPolicies(ctx, regUser, true, groupPolicyMap)
if userErr != nil {
return info, errSRBackendIssue(userErr)
}
}
info.GroupPolicies = make(map[string]madmin.SRPolicyMapping, len(c.state.Peers))
for group, mp := range groupPolicyMap {
info.GroupPolicies[group] = madmin.SRPolicyMapping{
IsGroup: true,
UserOrGroup: group,
Policy: mp.Policies,
UpdatedAt: mp.UpdatedAt,
}
}
info.GroupDescMap = make(map[string]madmin.GroupDesc)
if opts.Entity == madmin.SRGroupEntity {
if gd, err := globalIAMSys.GetGroupDescription(opts.EntityValue); err == nil {
info.GroupDescMap[opts.EntityValue] = gd
}
} else {
// get users/group info on local.
groups, errG := globalIAMSys.store.listGroups(ctx)
if errG != nil {
return info, errSRBackendIssue(errG)
}
groupDescMap := make(map[string]madmin.GroupDesc, len(groups))
for _, g := range groups {
groupDescMap[g], errG = globalIAMSys.GetGroupDescription(g)
if errG != nil {
return info, errSRBackendIssue(errG)
}
}
for group, d := range groupDescMap {
info.GroupDescMap[group] = d
}
}
}
// cache SR metadata info for IAM
if opts.Users && opts.Groups && opts.Policies && !opts.Buckets {
c.srCacheIAMInfo(info)
}
return info, nil
}
// EditPeerCluster - edits replication configuration and updates peer endpoint.
func (c *SiteReplicationSys) EditPeerCluster(ctx context.Context, peer madmin.PeerInfo) (madmin.ReplicateEditStatus, error) {
sites, err := c.GetClusterInfo(ctx)
if err != nil {
return madmin.ReplicateEditStatus{}, errSRBackendIssue(err)
}
if !sites.Enabled {
return madmin.ReplicateEditStatus{}, errSRNotEnabled
}
var (
found bool
admClient *madmin.AdminClient
)
if globalDeploymentID == peer.DeploymentID && !peer.SyncState.Empty() {
return madmin.ReplicateEditStatus{}, errSRInvalidRequest(fmt.Errorf("A peer cluster, rather than the local cluster (endpoint=%s, deployment-id=%s) needs to be specified while setting a 'sync' replication mode", peer.Endpoint, peer.DeploymentID))
}
for _, v := range sites.Sites {
if peer.DeploymentID == v.DeploymentID {
found = true
if !peer.SyncState.Empty() && peer.Endpoint == "" { // peer.Endpoint may be "" if only sync state is being updated
break
}
if peer.Endpoint == v.Endpoint && peer.SyncState.Empty() {
return madmin.ReplicateEditStatus{}, errSRInvalidRequest(fmt.Errorf("Endpoint %s entered for deployment id %s already configured in site replication", v.Endpoint, v.DeploymentID))
}
admClient, err = c.getAdminClientWithEndpoint(ctx, v.DeploymentID, peer.Endpoint)
if err != nil {
return madmin.ReplicateEditStatus{}, errSRPeerResp(fmt.Errorf("unable to create admin client for %s: %w", v.Name, err))
}
// check if endpoint is reachable
info, err := admClient.ServerInfo(ctx)
if err != nil {
return madmin.ReplicateEditStatus{}, errSRInvalidRequest(fmt.Errorf("Endpoint %s not reachable: %w", peer.Endpoint, err))
}
if info.DeploymentID != v.DeploymentID {
return madmin.ReplicateEditStatus{}, errSRInvalidRequest(fmt.Errorf("Endpoint %s does not belong to deployment expected: %s (found %s) ", v.Endpoint, v.DeploymentID, info.DeploymentID))
}
}
}
if !found {
return madmin.ReplicateEditStatus{}, errSRInvalidRequest(fmt.Errorf("%s not found in existing replicated sites", peer.DeploymentID))
}
pi := c.state.Peers[peer.DeploymentID]
prevPeerInfo := pi
if !peer.SyncState.Empty() { // update replication to peer to be sync/async
pi.SyncState = peer.SyncState
c.state.Peers[peer.DeploymentID] = pi
}
if peer.Endpoint != "" { // `admin replicate update` requested an endpoint change
pi.Endpoint = peer.Endpoint
}
if admClient != nil {
errs := make(map[string]error, len(c.state.Peers))
var wg sync.WaitGroup
for i, v := range sites.Sites {
if v.DeploymentID == globalDeploymentID {
c.state.Peers[peer.DeploymentID] = pi
continue
}
wg.Add(1)
go func(pi madmin.PeerInfo, i int) {
defer wg.Done()
v := sites.Sites[i]
admClient, err := c.getAdminClient(ctx, v.DeploymentID)
if v.DeploymentID == peer.DeploymentID {
admClient, err = c.getAdminClientWithEndpoint(ctx, v.DeploymentID, peer.Endpoint)
}
if err != nil {
errs[v.DeploymentID] = errSRPeerResp(fmt.Errorf("unable to create admin client for %s: %w", v.Name, err))
return
}
if err = admClient.SRPeerEdit(ctx, pi); err != nil {
errs[v.DeploymentID] = errSRPeerResp(fmt.Errorf("unable to update peer %s: %w", v.Name, err))
return
}
}(pi, i)
}
wg.Wait()
for dID, err := range errs {
if err != nil {
return madmin.ReplicateEditStatus{}, errSRPeerResp(fmt.Errorf("unable to update peer %s: %w", c.state.Peers[dID].Name, err))
}
}
}
// we can now save the cluster replication configuration state.
if err = c.saveToDisk(ctx, c.state); err != nil {
return madmin.ReplicateEditStatus{
Status: madmin.ReplicateAddStatusPartial,
ErrDetail: fmt.Sprintf("unable to save cluster-replication state on local: %v", err),
}, nil
}
if err = c.updateTargetEndpoints(ctx, prevPeerInfo, pi); err != nil {
return madmin.ReplicateEditStatus{
Status: madmin.ReplicateAddStatusPartial,
ErrDetail: fmt.Sprintf("unable to update peer targets on local: %v", err),
}, nil
}
result := madmin.ReplicateEditStatus{
Success: true,
Status: fmt.Sprintf("Cluster replication configuration updated with endpoint %s for peer %s successfully", peer.Endpoint, peer.Name),
}
return result, nil
}
func (c *SiteReplicationSys) updateTargetEndpoints(ctx context.Context, prevInfo, peer madmin.PeerInfo) error {
objAPI := newObjectLayerFn()
if objAPI == nil {
return errSRObjectLayerNotReady
}
buckets, err := objAPI.ListBuckets(ctx, BucketOptions{})
if err != nil {
return err
}
for _, bucketInfo := range buckets {
bucket := bucketInfo.Name
ep, _ := url.Parse(peer.Endpoint)
prevEp, _ := url.Parse(prevInfo.Endpoint)
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
if err != nil {
continue // site healing will take care of configuring new targets
}
for _, target := range targets.Targets {
if target.SourceBucket == bucket &&
target.TargetBucket == bucket &&
target.Endpoint == prevEp.Host &&
target.Secure == (prevEp.Scheme == "https") &&
target.Type == madmin.ReplicationService {
bucketTarget := target
bucketTarget.Secure = ep.Scheme == "https"
bucketTarget.Endpoint = ep.Host
if !peer.SyncState.Empty() {
bucketTarget.ReplicationSync = (peer.SyncState == madmin.SyncEnabled)
}
err := globalBucketTargetSys.SetTarget(ctx, bucket, &bucketTarget, true)
if err != nil {
logger.LogIf(ctx, c.annotatePeerErr(peer.Name, "Bucket target creation error", err))
continue
}
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
if err != nil {
logger.LogIf(ctx, err)
continue
}
tgtBytes, err := json.Marshal(&targets)
if err != nil {
logger.LogIf(ctx, err)
continue
}
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil {
logger.LogIf(ctx, err)
continue
}
}
}
}
return nil
}
// PeerEditReq - internal API handler to respond to a peer cluster's request
// to edit endpoint.
func (c *SiteReplicationSys) PeerEditReq(ctx context.Context, arg madmin.PeerInfo) error {
ourName := ""
for i := range c.state.Peers {
p := c.state.Peers[i]
if p.DeploymentID == arg.DeploymentID {
p.Endpoint = arg.Endpoint
c.state.Peers[arg.DeploymentID] = p
}
if p.DeploymentID == globalDeploymentID {
ourName = p.Name
}
}
if err := c.saveToDisk(ctx, c.state); err != nil {
return errSRBackendIssue(fmt.Errorf("unable to save cluster-replication state to drive on %s: %v", ourName, err))
}
return nil
}
const siteHealTimeInterval = 30 * time.Second
func (c *SiteReplicationSys) startHealRoutine(ctx context.Context, objAPI ObjectLayer) {
ctx, cancel := globalLeaderLock.GetLock(ctx)
defer cancel()
healTimer := time.NewTimer(siteHealTimeInterval)
defer healTimer.Stop()
var maxRefreshDurationSecondsForLog float64 = 10 // 10 seconds..
for {
select {
case <-healTimer.C:
c.RLock()
enabled := c.enabled
c.RUnlock()
if enabled {
refreshStart := time.Now()
c.healIAMSystem(ctx, objAPI) // heal IAM system first
c.healBuckets(ctx, objAPI) // heal buckets subsequently
took := time.Since(refreshStart).Seconds()
if took > maxRefreshDurationSecondsForLog {
// Log if we took a lot of time.
logger.Info("Site replication healing refresh took %.2fs", took)
}
// wait for 200 millisecond, if we are experience lot of I/O
waitForLowIO(runtime.GOMAXPROCS(0), 200*time.Millisecond, currentHTTPIO)
}
healTimer.Reset(siteHealTimeInterval)
case <-ctx.Done():
return
}
}
}
type srBucketStatsSummary struct {
madmin.SRBucketStatsSummary
meta srBucketMetaInfo
}
type srPolicyStatsSummary struct {
madmin.SRPolicyStatsSummary
policy srPolicy
}
type srUserStatsSummary struct {
madmin.SRUserStatsSummary
userInfo srUserInfo
userPolicy srPolicyMapping
}
type srGroupStatsSummary struct {
madmin.SRGroupStatsSummary
groupDesc srGroupDesc
groupPolicy srPolicyMapping
}
type srStatusInfo struct {
// SRStatusInfo returns detailed status on site replication status
Enabled bool
MaxBuckets int // maximum buckets seen across sites
MaxUsers int // maximum users seen across sites
MaxGroups int // maximum groups seen across sites
MaxPolicies int // maximum policies across sites
Sites map[string]madmin.PeerInfo // deployment->sitename
StatsSummary map[string]madmin.SRSiteSummary // map of deployment id -> site stat
// BucketStats map of bucket to slice of deployment IDs with stats. This is populated only if there are
// mismatches or if a specific bucket's stats are requested
BucketStats map[string]map[string]srBucketStatsSummary
// PolicyStats map of policy to slice of deployment IDs with stats. This is populated only if there are
// mismatches or if a specific bucket's stats are requested
PolicyStats map[string]map[string]srPolicyStatsSummary
// UserStats map of user to slice of deployment IDs with stats. This is populated only if there are
// mismatches or if a specific bucket's stats are requested
UserStats map[string]map[string]srUserStatsSummary
// GroupStats map of group to slice of deployment IDs with stats. This is populated only if there are
// mismatches or if a specific bucket's stats are requested
GroupStats map[string]map[string]srGroupStatsSummary
}
// SRBucketDeleteOp - type of delete op
type SRBucketDeleteOp string
const (
// MarkDelete creates .minio.sys/buckets/.deleted/<bucket> vol entry to hold onto deleted bucket's state
// until peers are synced in site replication setup.
MarkDelete SRBucketDeleteOp = "MarkDelete"
// Purge deletes the .minio.sys/buckets/.deleted/<bucket> vol entry
Purge SRBucketDeleteOp = "Purge"
// NoOp no action needed
NoOp SRBucketDeleteOp = "NoOp"
)
// Empty returns true if this Op is not set
func (s SRBucketDeleteOp) Empty() bool {
return string(s) == "" || string(s) == string(NoOp)
}
func getSRBucketDeleteOp(isSiteReplicated bool) SRBucketDeleteOp {
if !isSiteReplicated {
return NoOp
}
return MarkDelete
}
func (c *SiteReplicationSys) healBuckets(ctx context.Context, objAPI ObjectLayer) error {
buckets, err := c.listBuckets(ctx)
if err != nil {
return err
}
for _, bi := range buckets {
bucket := bi.Name
info, err := c.siteReplicationStatus(ctx, objAPI, madmin.SRStatusOptions{
Entity: madmin.SRBucketEntity,
EntityValue: bucket,
ShowDeleted: true,
})
if err != nil {
logger.LogIf(ctx, err)
continue
}
c.healBucket(ctx, objAPI, bucket, info)
if bi.Deleted.IsZero() || (!bi.Created.IsZero() && bi.Deleted.Before(bi.Created)) {
c.healVersioningMetadata(ctx, objAPI, bucket, info)
c.healOLockConfigMetadata(ctx, objAPI, bucket, info)
c.healSSEMetadata(ctx, objAPI, bucket, info)
c.healBucketReplicationConfig(ctx, objAPI, bucket, info)
c.healBucketPolicies(ctx, objAPI, bucket, info)
c.healTagMetadata(ctx, objAPI, bucket, info)
c.healBucketQuotaConfig(ctx, objAPI, bucket, info)
}
// Notification and ILM are site specific settings.
}
return nil
}
func (c *SiteReplicationSys) healTagMetadata(ctx context.Context, objAPI ObjectLayer, bucket string, info srStatusInfo) error {
bs := info.BucketStats[bucket]
c.RLock()
defer c.RUnlock()
if !c.enabled {
return nil
}
var (
latestID, latestPeerName string
lastUpdate time.Time
latestTaggingConfig *string
)
for dID, ss := range bs {
if lastUpdate.IsZero() {
lastUpdate = ss.meta.TagConfigUpdatedAt
latestID = dID
latestTaggingConfig = ss.meta.Tags
}
// avoid considering just created buckets as latest. Perhaps this site
// just joined cluster replication and yet to be sync'd
if ss.meta.CreatedAt.Equal(ss.meta.TagConfigUpdatedAt) {
continue
}
if ss.meta.TagConfigUpdatedAt.After(lastUpdate) {
lastUpdate = ss.meta.TagConfigUpdatedAt
latestID = dID
latestTaggingConfig = ss.meta.Tags
}
}
latestPeerName = info.Sites[latestID].Name
var latestTaggingConfigBytes []byte
var err error
if latestTaggingConfig != nil {
latestTaggingConfigBytes, err = base64.StdEncoding.DecodeString(*latestTaggingConfig)
if err != nil {
return err
}
}
for dID, bStatus := range bs {
if !bStatus.TagMismatch {
continue
}
if isBucketMetadataEqual(latestTaggingConfig, bStatus.meta.Tags) {
continue
}
if dID == globalDeploymentID {
if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketTaggingConfig, latestTaggingConfigBytes); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal tagging metadata from peer site %s : %w", latestPeerName, err))
}
continue
}
admClient, err := c.getAdminClient(ctx, dID)
if err != nil {
return wrapSRErr(err)
}
peerName := info.Sites[dID].Name
err = admClient.SRPeerReplicateBucketMeta(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeTags,
Bucket: bucket,
Tags: latestTaggingConfig,
})
if err != nil {
logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata,
fmt.Errorf("Unable to heal tagging metadata for peer %s from peer %s : %w", peerName, latestPeerName, err)))
}
}
return nil
}
func (c *SiteReplicationSys) healBucketPolicies(ctx context.Context, objAPI ObjectLayer, bucket string, info srStatusInfo) error {
bs := info.BucketStats[bucket]
c.RLock()
defer c.RUnlock()
if !c.enabled {
return nil
}
var (
latestID, latestPeerName string
lastUpdate time.Time
latestIAMPolicy json.RawMessage
)
for dID, ss := range bs {
if lastUpdate.IsZero() {
lastUpdate = ss.meta.PolicyUpdatedAt
latestID = dID
latestIAMPolicy = ss.meta.Policy
}
// avoid considering just created buckets as latest. Perhaps this site
// just joined cluster replication and yet to be sync'd
if ss.meta.CreatedAt.Equal(ss.meta.PolicyUpdatedAt) {
continue
}
if ss.meta.PolicyUpdatedAt.After(lastUpdate) {
lastUpdate = ss.meta.PolicyUpdatedAt
latestID = dID
latestIAMPolicy = ss.meta.Policy
}
}
latestPeerName = info.Sites[latestID].Name
for dID, bStatus := range bs {
if !bStatus.PolicyMismatch {
continue
}
if strings.EqualFold(string(latestIAMPolicy), string(bStatus.meta.Policy)) {
continue
}
if dID == globalDeploymentID {
if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketPolicyConfig, latestIAMPolicy); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal bucket policy metadata from peer site %s : %w", latestPeerName, err))
}
continue
}
admClient, err := c.getAdminClient(ctx, dID)
if err != nil {
return wrapSRErr(err)
}
peerName := info.Sites[dID].Name
if err = admClient.SRPeerReplicateBucketMeta(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypePolicy,
Bucket: bucket,
Policy: latestIAMPolicy,
UpdatedAt: lastUpdate,
}); err != nil {
logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata,
fmt.Errorf("Unable to heal bucket policy metadata for peer %s from peer %s : %w",
peerName, latestPeerName, err)))
}
}
return nil
}
func (c *SiteReplicationSys) healBucketQuotaConfig(ctx context.Context, objAPI ObjectLayer, bucket string, info srStatusInfo) error {
bs := info.BucketStats[bucket]
c.RLock()
defer c.RUnlock()
if !c.enabled {
return nil
}
var (
latestID, latestPeerName string
lastUpdate time.Time
latestQuotaConfig *string
latestQuotaConfigBytes []byte
)
for dID, ss := range bs {
if lastUpdate.IsZero() {
lastUpdate = ss.meta.QuotaConfigUpdatedAt
latestID = dID
latestQuotaConfig = ss.meta.QuotaConfig
}
// avoid considering just created buckets as latest. Perhaps this site
// just joined cluster replication and yet to be sync'd
if ss.meta.CreatedAt.Equal(ss.meta.QuotaConfigUpdatedAt) {
continue
}
if ss.meta.QuotaConfigUpdatedAt.After(lastUpdate) {
lastUpdate = ss.meta.QuotaConfigUpdatedAt
latestID = dID
latestQuotaConfig = ss.meta.QuotaConfig
}
}
var err error
if latestQuotaConfig != nil {
latestQuotaConfigBytes, err = base64.StdEncoding.DecodeString(*latestQuotaConfig)
if err != nil {
return err
}
}
latestPeerName = info.Sites[latestID].Name
for dID, bStatus := range bs {
if !bStatus.QuotaCfgMismatch {
continue
}
if isBucketMetadataEqual(latestQuotaConfig, bStatus.meta.QuotaConfig) {
continue
}
if dID == globalDeploymentID {
if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketQuotaConfigFile, latestQuotaConfigBytes); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal quota metadata from peer site %s : %w", latestPeerName, err))
}
continue
}
admClient, err := c.getAdminClient(ctx, dID)
if err != nil {
return wrapSRErr(err)
}
peerName := info.Sites[dID].Name
if err = admClient.SRPeerReplicateBucketMeta(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeQuotaConfig,
Bucket: bucket,
Quota: latestQuotaConfigBytes,
UpdatedAt: lastUpdate,
}); err != nil {
logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata,
fmt.Errorf("Unable to heal quota config metadata for peer %s from peer %s : %w",
peerName, latestPeerName, err)))
}
}
return nil
}
func (c *SiteReplicationSys) healVersioningMetadata(ctx context.Context, objAPI ObjectLayer, bucket string, info srStatusInfo) error {
c.RLock()
defer c.RUnlock()
if !c.enabled {
return nil
}
var (
latestID, latestPeerName string
lastUpdate time.Time
latestVersioningConfig *string
)
bs := info.BucketStats[bucket]
for dID, ss := range bs {
if lastUpdate.IsZero() {
lastUpdate = ss.meta.VersioningConfigUpdatedAt
latestID = dID
latestVersioningConfig = ss.meta.Versioning
}
// avoid considering just created buckets as latest. Perhaps this site
// just joined cluster replication and yet to be sync'd
if ss.meta.CreatedAt.Equal(ss.meta.VersioningConfigUpdatedAt) {
continue
}
if ss.meta.VersioningConfigUpdatedAt.After(lastUpdate) {
lastUpdate = ss.meta.VersioningConfigUpdatedAt
latestID = dID
latestVersioningConfig = ss.meta.Versioning
}
}
latestPeerName = info.Sites[latestID].Name
var latestVersioningConfigBytes []byte
var err error
if latestVersioningConfig != nil {
latestVersioningConfigBytes, err = base64.StdEncoding.DecodeString(*latestVersioningConfig)
if err != nil {
return err
}
}
for dID, bStatus := range bs {
if !bStatus.VersioningConfigMismatch {
continue
}
if isBucketMetadataEqual(latestVersioningConfig, bStatus.meta.Versioning) {
continue
}
if dID == globalDeploymentID {
if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketVersioningConfig, latestVersioningConfigBytes); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal versioning metadata from peer site %s : %w", latestPeerName, err))
}
continue
}
admClient, err := c.getAdminClient(ctx, dID)
if err != nil {
return wrapSRErr(err)
}
peerName := info.Sites[dID].Name
err = admClient.SRPeerReplicateBucketMeta(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeVersionConfig,
Bucket: bucket,
Versioning: latestVersioningConfig,
UpdatedAt: lastUpdate,
})
if err != nil {
logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata,
fmt.Errorf("Unable to heal versioning config metadata for peer %s from peer %s : %w",
peerName, latestPeerName, err)))
}
}
return nil
}
func (c *SiteReplicationSys) healSSEMetadata(ctx context.Context, objAPI ObjectLayer, bucket string, info srStatusInfo) error {
c.RLock()
defer c.RUnlock()
if !c.enabled {
return nil
}
var (
latestID, latestPeerName string
lastUpdate time.Time
latestSSEConfig *string
)
bs := info.BucketStats[bucket]
for dID, ss := range bs {
if lastUpdate.IsZero() {
lastUpdate = ss.meta.SSEConfigUpdatedAt
latestID = dID
latestSSEConfig = ss.meta.SSEConfig
}
// avoid considering just created buckets as latest. Perhaps this site
// just joined cluster replication and yet to be sync'd
if ss.meta.CreatedAt.Equal(ss.meta.SSEConfigUpdatedAt) {
continue
}
if ss.meta.SSEConfigUpdatedAt.After(lastUpdate) {
lastUpdate = ss.meta.SSEConfigUpdatedAt
latestID = dID
latestSSEConfig = ss.meta.SSEConfig
}
}
latestPeerName = info.Sites[latestID].Name
var latestSSEConfigBytes []byte
var err error
if latestSSEConfig != nil {
latestSSEConfigBytes, err = base64.StdEncoding.DecodeString(*latestSSEConfig)
if err != nil {
return err
}
}
for dID, bStatus := range bs {
if !bStatus.SSEConfigMismatch {
continue
}
if isBucketMetadataEqual(latestSSEConfig, bStatus.meta.SSEConfig) {
continue
}
if dID == globalDeploymentID {
if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketSSEConfig, latestSSEConfigBytes); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal sse metadata from peer site %s : %w", latestPeerName, err))
}
continue
}
admClient, err := c.getAdminClient(ctx, dID)
if err != nil {
return wrapSRErr(err)
}
peerName := info.Sites[dID].Name
err = admClient.SRPeerReplicateBucketMeta(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeSSEConfig,
Bucket: bucket,
SSEConfig: latestSSEConfig,
UpdatedAt: lastUpdate,
})
if err != nil {
logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata,
fmt.Errorf("Unable to heal SSE config metadata for peer %s from peer %s : %w",
peerName, latestPeerName, err)))
}
}
return nil
}
func (c *SiteReplicationSys) healOLockConfigMetadata(ctx context.Context, objAPI ObjectLayer, bucket string, info srStatusInfo) error {
bs := info.BucketStats[bucket]
c.RLock()
defer c.RUnlock()
if !c.enabled {
return nil
}
var (
latestID, latestPeerName string
lastUpdate time.Time
latestObjLockConfig *string
)
for dID, ss := range bs {
if lastUpdate.IsZero() {
lastUpdate = ss.meta.ObjectLockConfigUpdatedAt
latestID = dID
latestObjLockConfig = ss.meta.ObjectLockConfig
}
// avoid considering just created buckets as latest. Perhaps this site
// just joined cluster replication and yet to be sync'd
if ss.meta.CreatedAt.Equal(ss.meta.ObjectLockConfigUpdatedAt) {
continue
}
if ss.meta.ObjectLockConfig != nil && ss.meta.ObjectLockConfigUpdatedAt.After(lastUpdate) {
lastUpdate = ss.meta.ObjectLockConfigUpdatedAt
latestID = dID
latestObjLockConfig = ss.meta.ObjectLockConfig
}
}
latestPeerName = info.Sites[latestID].Name
var latestObjLockConfigBytes []byte
var err error
if latestObjLockConfig != nil {
latestObjLockConfigBytes, err = base64.StdEncoding.DecodeString(*latestObjLockConfig)
if err != nil {
return err
}
}
for dID, bStatus := range bs {
if !bStatus.OLockConfigMismatch {
continue
}
if isBucketMetadataEqual(latestObjLockConfig, bStatus.meta.ObjectLockConfig) {
continue
}
if dID == globalDeploymentID {
if _, err := globalBucketMetadataSys.Update(ctx, bucket, objectLockConfig, latestObjLockConfigBytes); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal objectlock config metadata from peer site %s : %w", latestPeerName, err))
}
continue
}
admClient, err := c.getAdminClient(ctx, dID)
if err != nil {
return wrapSRErr(err)
}
peerName := info.Sites[dID].Name
err = admClient.SRPeerReplicateBucketMeta(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeObjectLockConfig,
Bucket: bucket,
Tags: latestObjLockConfig,
UpdatedAt: lastUpdate,
})
if err != nil {
logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata,
fmt.Errorf("Unable to heal object lock config metadata for peer %s from peer %s : %w",
peerName, latestPeerName, err)))
}
}
return nil
}
func (c *SiteReplicationSys) purgeDeletedBucket(ctx context.Context, objAPI ObjectLayer, bucket string) {
z, ok := objAPI.(*erasureServerPools)
if !ok {
return
}
z.s3Peer.DeleteBucket(context.Background(), pathJoin(minioMetaBucket, bucketMetaPrefix, deletedBucketsPrefix, bucket), DeleteBucketOptions{})
}
// healBucket creates/deletes the bucket according to latest state across clusters participating in site replication.
func (c *SiteReplicationSys) healBucket(ctx context.Context, objAPI ObjectLayer, bucket string, info srStatusInfo) error {
bs := info.BucketStats[bucket]
c.RLock()
defer c.RUnlock()
if !c.enabled {
return nil
}
numSites := len(c.state.Peers)
mostRecent := func(d1, d2 time.Time) time.Time {
if d1.IsZero() {
return d2
}
if d2.IsZero() {
return d1
}
if d1.After(d2) {
return d1
}
return d2
}
var (
latestID string
lastUpdate time.Time
withB []string
missingB []string
deletedCnt int
)
for dID, ss := range bs {
if lastUpdate.IsZero() {
lastUpdate = mostRecent(ss.meta.CreatedAt, ss.meta.DeletedAt)
latestID = dID
}
recentUpdt := mostRecent(ss.meta.CreatedAt, ss.meta.DeletedAt)
if recentUpdt.After(lastUpdate) {
lastUpdate = recentUpdt
latestID = dID
}
if ss.BucketMarkedDeleted {
deletedCnt++
}
if ss.HasBucket {
withB = append(withB, dID)
} else {
missingB = append(missingB, dID)
}
}
latestPeerName := info.Sites[latestID].Name
bStatus := info.BucketStats[bucket][latestID].meta
isMakeBucket := len(missingB) > 0
deleteOp := NoOp
if latestID != globalDeploymentID {
return nil
}
if lastUpdate.Equal(bStatus.DeletedAt) {
isMakeBucket = false
switch {
case len(withB) == numSites && deletedCnt == numSites:
deleteOp = NoOp
case len(withB) == 0 && len(missingB) == numSites:
deleteOp = Purge
default:
deleteOp = MarkDelete
}
}
if isMakeBucket {
var opts MakeBucketOptions
optsMap := make(map[string]string)
optsMap["versioningEnabled"] = "true"
opts.VersioningEnabled = true
opts.CreatedAt = bStatus.CreatedAt
optsMap["createdAt"] = bStatus.CreatedAt.UTC().Format(time.RFC3339Nano)
if bStatus.ObjectLockConfig != nil {
config, err := base64.StdEncoding.DecodeString(*bStatus.ObjectLockConfig)
if err != nil {
return err
}
if bytes.Equal([]byte(string(config)), enabledBucketObjectLockConfig) {
optsMap["lockEnabled"] = "true"
opts.LockEnabled = true
}
}
for _, dID := range missingB {
peerName := info.Sites[dID].Name
if dID == globalDeploymentID {
err := c.PeerBucketMakeWithVersioningHandler(ctx, bucket, opts)
if err != nil {
return c.annotateErr(makeBucketWithVersion, fmt.Errorf("error healing bucket for site replication %w from %s -> %s",
err, latestPeerName, peerName))
}
} else {
admClient, err := c.getAdminClient(ctx, dID)
if err != nil {
return c.annotateErr(configureReplication, fmt.Errorf("unable to use admin client for %s: %w", dID, err))
}
if err = admClient.SRPeerBucketOps(ctx, bucket, madmin.MakeWithVersioningBktOp, optsMap); err != nil {
return c.annotatePeerErr(peerName, makeBucketWithVersion, err)
}
if err = admClient.SRPeerBucketOps(ctx, bucket, madmin.ConfigureReplBktOp, nil); err != nil {
return c.annotatePeerErr(peerName, configureReplication, err)
}
}
}
if len(missingB) > 0 {
// configure replication from current cluster to other clusters
err := c.PeerBucketConfigureReplHandler(ctx, bucket)
if err != nil {
return c.annotateErr(configureReplication, err)
}
}
return nil
}
// all buckets are marked deleted across sites at this point. It should be safe to purge the .minio.sys/buckets/.deleted/<bucket> entry
// from disk
if deleteOp == Purge {
for _, dID := range missingB {
peerName := info.Sites[dID].Name
if dID == globalDeploymentID {
c.purgeDeletedBucket(ctx, objAPI, bucket)
} else {
admClient, err := c.getAdminClient(ctx, dID)
if err != nil {
return c.annotateErr(configureReplication, fmt.Errorf("unable to use admin client for %s: %w", dID, err))
}
if err = admClient.SRPeerBucketOps(ctx, bucket, madmin.PurgeDeletedBucketOp, nil); err != nil {
return c.annotatePeerErr(peerName, deleteBucket, err)
}
}
}
}
// Mark buckets deleted on remaining peers
if deleteOp == MarkDelete {
for _, dID := range withB {
peerName := info.Sites[dID].Name
if dID == globalDeploymentID {
err := c.PeerBucketDeleteHandler(ctx, bucket, DeleteBucketOptions{
Force: true,
})
if err != nil {
return c.annotateErr(deleteBucket, fmt.Errorf("error healing bucket for site replication %w from %s -> %s",
err, latestPeerName, peerName))
}
} else {
admClient, err := c.getAdminClient(ctx, dID)
if err != nil {
return c.annotateErr(configureReplication, fmt.Errorf("unable to use admin client for %s: %w", dID, err))
}
if err = admClient.SRPeerBucketOps(ctx, bucket, madmin.ForceDeleteBucketBktOp, nil); err != nil {
return c.annotatePeerErr(peerName, deleteBucket, err)
}
}
}
}
return nil
}
func (c *SiteReplicationSys) healBucketReplicationConfig(ctx context.Context, objAPI ObjectLayer, bucket string, info srStatusInfo) error {
bs := info.BucketStats[bucket]
c.RLock()
defer c.RUnlock()
if !c.enabled {
return nil
}
var replMismatch bool
for _, ss := range bs {
if ss.ReplicationCfgMismatch {
replMismatch = true
break
}
}
rcfg, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket)
if err != nil {
_, ok := err.(BucketReplicationConfigNotFound)
if !ok {
return err
}
replMismatch = true
}
var (
epDeplIDMap = make(map[string]string)
arnTgtMap = make(map[string]madmin.BucketTarget)
)
if targetsPtr, _ := globalBucketTargetSys.ListBucketTargets(ctx, bucket); targetsPtr != nil {
for _, t := range targetsPtr.Targets {
arnTgtMap[t.Arn] = t
}
}
for _, p := range c.state.Peers {
epDeplIDMap[p.Endpoint] = p.DeploymentID
}
// fix stale ARN's in replication config and endpoint mismatch between site config and
// targets associated to this config.
if rcfg != nil {
for _, rule := range rcfg.Rules {
if rule.Status != sreplication.Status(replication.Disabled) {
tgt, isValidARN := arnTgtMap[rule.Destination.ARN] // detect stale ARN in replication config
_, epFound := epDeplIDMap[tgt.URL().String()] // detect end point change at site level
if !isValidARN || !epFound {
replMismatch = true
break
}
}
}
}
if rcfg != nil && !replMismatch {
// validate remote targets on current cluster for this bucket
_, apiErr := validateReplicationDestination(ctx, bucket, rcfg, false)
if apiErr != noError {
replMismatch = true
}
}
if replMismatch {
logger.LogIf(ctx, c.annotateErr(configureReplication, c.PeerBucketConfigureReplHandler(ctx, bucket)))
}
return nil
}
func isBucketMetadataEqual(one, two *string) bool {
switch {
case one == nil && two == nil:
return true
case one == nil || two == nil:
return false
default:
return strings.EqualFold(*one, *two)
}
}
func (c *SiteReplicationSys) healIAMSystem(ctx context.Context, objAPI ObjectLayer) error {
info, err := c.siteReplicationStatus(ctx, objAPI, madmin.SRStatusOptions{
Users: true,
Policies: true,
Groups: true,
})
if err != nil {
return err
}
for policy := range info.PolicyStats {
c.healPolicies(ctx, objAPI, policy, info)
}
for user := range info.UserStats {
c.healUsers(ctx, objAPI, user, info)
}
for group := range info.GroupStats {
c.healGroups(ctx, objAPI, group, info)
}
for user := range info.UserStats {
c.healUserPolicies(ctx, objAPI, user, info)
}
for group := range info.GroupStats {
c.healGroupPolicies(ctx, objAPI, group, info)
}
return nil
}
// heal iam policies present on this site to peers, provided current cluster has the most recent update.
func (c *SiteReplicationSys) healPolicies(ctx context.Context, objAPI ObjectLayer, policy string, info srStatusInfo) error {
// create IAM policy on peer cluster if missing
ps := info.PolicyStats[policy]
c.RLock()
defer c.RUnlock()
if !c.enabled {
return nil
}
var (
latestID, latestPeerName string
lastUpdate time.Time
latestPolicyStat srPolicyStatsSummary
)
for dID, ss := range ps {
if lastUpdate.IsZero() {
lastUpdate = ss.policy.UpdatedAt
latestID = dID
latestPolicyStat = ss
}
if !ss.policy.UpdatedAt.IsZero() && ss.policy.UpdatedAt.After(lastUpdate) {
lastUpdate = ss.policy.UpdatedAt
latestID = dID
latestPolicyStat = ss
}
}
if latestID != globalDeploymentID {
// heal only from the site with latest info.
return nil
}
latestPeerName = info.Sites[latestID].Name
// heal policy of peers if peer does not have it.
for dID, pStatus := range ps {
if dID == globalDeploymentID {
continue
}
if !pStatus.PolicyMismatch && pStatus.HasPolicy {
continue
}
peerName := info.Sites[dID].Name
err := c.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemPolicy,
Name: policy,
Policy: latestPolicyStat.policy.Policy,
UpdatedAt: lastUpdate,
})
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal IAM policy %s from peer site %s -> site %s : %w", policy, latestPeerName, peerName, err))
}
}
return nil
}
// heal user policy mappings present on this site to peers, provided current cluster has the most recent update.
func (c *SiteReplicationSys) healUserPolicies(ctx context.Context, objAPI ObjectLayer, user string, info srStatusInfo) error {
// create user policy mapping on peer cluster if missing
us := info.UserStats[user]
c.RLock()
defer c.RUnlock()
if !c.enabled {
return nil
}
var (
latestID, latestPeerName string
lastUpdate time.Time
latestUserStat srUserStatsSummary
)
for dID, ss := range us {
if lastUpdate.IsZero() {
lastUpdate = ss.userPolicy.UpdatedAt
latestID = dID
latestUserStat = ss
}
if !ss.userPolicy.UpdatedAt.IsZero() && ss.userPolicy.UpdatedAt.After(lastUpdate) {
lastUpdate = ss.userPolicy.UpdatedAt
latestID = dID
latestUserStat = ss
}
}
if latestID != globalDeploymentID {
// heal only from the site with latest info.
return nil
}
latestPeerName = info.Sites[latestID].Name
// heal policy of peers if peer does not have it.
for dID, pStatus := range us {
if dID == globalDeploymentID {
continue
}
if !pStatus.PolicyMismatch && pStatus.HasPolicyMapping {
continue
}
if isPolicyMappingEqual(pStatus.userPolicy, latestUserStat.userPolicy) {
continue
}
peerName := info.Sites[dID].Name
err := c.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemPolicyMapping,
PolicyMapping: &madmin.SRPolicyMapping{
UserOrGroup: user,
IsGroup: false,
Policy: latestUserStat.userPolicy.Policy,
},
UpdatedAt: lastUpdate,
})
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal IAM user policy mapping for %s from peer site %s -> site %s : %w", user, latestPeerName, peerName, err))
}
}
return nil
}
// heal group policy mappings present on this site to peers, provided current cluster has the most recent update.
func (c *SiteReplicationSys) healGroupPolicies(ctx context.Context, objAPI ObjectLayer, group string, info srStatusInfo) error {
// create group policy mapping on peer cluster if missing
gs := info.GroupStats[group]
c.RLock()
defer c.RUnlock()
if !c.enabled {
return nil
}
var (
latestID, latestPeerName string
lastUpdate time.Time
latestGroupStat srGroupStatsSummary
)
for dID, ss := range gs {
if lastUpdate.IsZero() {
lastUpdate = ss.groupPolicy.UpdatedAt
latestID = dID
latestGroupStat = ss
}
if !ss.groupPolicy.UpdatedAt.IsZero() && ss.groupPolicy.UpdatedAt.After(lastUpdate) {
lastUpdate = ss.groupPolicy.UpdatedAt
latestID = dID
latestGroupStat = ss
}
}
if latestID != globalDeploymentID {
// heal only from the site with latest info.
return nil
}
latestPeerName = info.Sites[latestID].Name
// heal policy of peers if peer does not have it.
for dID, pStatus := range gs {
if dID == globalDeploymentID {
continue
}
if !pStatus.PolicyMismatch && pStatus.HasPolicyMapping {
continue
}
if isPolicyMappingEqual(pStatus.groupPolicy, latestGroupStat.groupPolicy) {
continue
}
peerName := info.Sites[dID].Name
err := c.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemPolicyMapping,
PolicyMapping: &madmin.SRPolicyMapping{
UserOrGroup: group,
IsGroup: true,
Policy: latestGroupStat.groupPolicy.Policy,
},
UpdatedAt: lastUpdate,
})
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal IAM group policy mapping for %s from peer site %s -> site %s : %w", group, latestPeerName, peerName, err))
}
}
return nil
}
// heal all users and their service accounts that are present on this site,
// provided current cluster has the most recent update.
func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer, user string, info srStatusInfo) error {
// create user if missing; fix user policy mapping if missing
us := info.UserStats[user]
c.RLock()
defer c.RUnlock()
if !c.enabled {
return nil
}
var (
latestID, latestPeerName string
lastUpdate time.Time
latestUserStat srUserStatsSummary
)
for dID, ss := range us {
if lastUpdate.IsZero() {
lastUpdate = ss.userInfo.UserInfo.UpdatedAt
latestID = dID
latestUserStat = ss
}
if !ss.userInfo.UserInfo.UpdatedAt.IsZero() && ss.userInfo.UserInfo.UpdatedAt.After(lastUpdate) {
lastUpdate = ss.userInfo.UserInfo.UpdatedAt
latestID = dID
latestUserStat = ss
}
}
if latestID != globalDeploymentID {
// heal only from the site with latest info.
return nil
}
latestPeerName = info.Sites[latestID].Name
for dID, uStatus := range us {
if dID == globalDeploymentID {
continue
}
if !uStatus.UserInfoMismatch {
continue
}
if isUserInfoEqual(latestUserStat.userInfo.UserInfo, uStatus.userInfo.UserInfo) {
continue
}
peerName := info.Sites[dID].Name
u, ok := globalIAMSys.GetUser(ctx, user)
if !ok {
continue
}
creds := u.Credentials
if creds.IsServiceAccount() {
claims, err := globalIAMSys.GetClaimsForSvcAcc(ctx, creds.AccessKey)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err))
continue
}
_, policy, err := globalIAMSys.GetServiceAccount(ctx, creds.AccessKey)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err))
continue
}
var policyJSON []byte
if policy != nil {
policyJSON, err = json.Marshal(policy)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err))
continue
}
}
if err := c.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemSvcAcc,
SvcAccChange: &madmin.SRSvcAccChange{
Create: &madmin.SRSvcAccCreate{
Parent: creds.ParentUser,
AccessKey: creds.AccessKey,
SecretKey: creds.SecretKey,
Groups: creds.Groups,
Claims: claims,
SessionPolicy: json.RawMessage(policyJSON),
Status: creds.Status,
Name: creds.Name,
Description: creds.Description,
Expiration: &creds.Expiration,
},
},
UpdatedAt: lastUpdate,
}); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err))
}
continue
}
if creds.IsTemp() && !creds.IsExpired() {
var parentPolicy string
u, err := globalIAMSys.GetUserInfo(ctx, creds.ParentUser)
if err != nil {
// Parent may be "virtual" (for ldap, oidc, client tls auth,
// custom auth plugin), so in such cases we apply no parent
// policy. The session token will contain info about policy to
// be applied.
if !errors.Is(err, errNoSuchUser) {
logger.LogIf(ctx, fmt.Errorf("Unable to heal temporary credentials %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err))
continue
}
} else {
parentPolicy = u.PolicyName
}
// Call hook for site replication.
if err := c.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemSTSAcc,
STSCredential: &madmin.SRSTSCredential{
AccessKey: creds.AccessKey,
SecretKey: creds.SecretKey,
SessionToken: creds.SessionToken,
ParentUser: creds.ParentUser,
ParentPolicyMapping: parentPolicy,
},
UpdatedAt: lastUpdate,
}); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal temporary credentials %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err))
}
continue
}
if err := c.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemIAMUser,
IAMUser: &madmin.SRIAMUser{
AccessKey: user,
IsDeleteReq: false,
UserReq: &madmin.AddOrUpdateUserReq{
SecretKey: creds.SecretKey,
Status: latestUserStat.userInfo.Status,
},
},
UpdatedAt: lastUpdate,
}); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal user %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err))
}
}
return nil
}
func (c *SiteReplicationSys) healGroups(ctx context.Context, objAPI ObjectLayer, group string, info srStatusInfo) error {
c.RLock()
defer c.RUnlock()
if !c.enabled {
return nil
}
var (
latestID, latestPeerName string
lastUpdate time.Time
latestGroupStat srGroupStatsSummary
)
// create group if missing; fix group policy mapping if missing
gs, ok := info.GroupStats[group]
if !ok {
return nil
}
for dID, ss := range gs {
if lastUpdate.IsZero() {
lastUpdate = ss.groupDesc.UpdatedAt
latestID = dID
latestGroupStat = ss
}
if !ss.groupDesc.UpdatedAt.IsZero() && ss.groupDesc.UpdatedAt.After(lastUpdate) {
lastUpdate = ss.groupDesc.UpdatedAt
latestID = dID
latestGroupStat = ss
}
}
if latestID != globalDeploymentID {
// heal only from the site with latest info.
return nil
}
latestPeerName = info.Sites[latestID].Name
for dID, gStatus := range gs {
if dID == globalDeploymentID {
continue
}
if !gStatus.GroupDescMismatch {
continue
}
if isGroupDescEqual(latestGroupStat.groupDesc.GroupDesc, gStatus.groupDesc.GroupDesc) {
continue
}
peerName := info.Sites[dID].Name
if err := c.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemGroupInfo,
GroupInfo: &madmin.SRGroupInfo{
UpdateReq: madmin.GroupAddRemove{
Group: group,
Status: madmin.GroupStatus(latestGroupStat.groupDesc.Status),
Members: latestGroupStat.groupDesc.Members,
IsRemove: false,
},
},
UpdatedAt: lastUpdate,
}); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal group %s from peer site %s -> site %s : %w", group, latestPeerName, peerName, err))
}
}
return nil
}
func isGroupDescEqual(g1, g2 madmin.GroupDesc) bool {
if g1.Name != g2.Name ||
g1.Status != g2.Status ||
g1.Policy != g2.Policy {
return false
}
if len(g1.Members) != len(g2.Members) {
return false
}
for _, v1 := range g1.Members {
var found bool
for _, v2 := range g2.Members {
if v1 == v2 {
found = true
break
}
}
if !found {
return false
}
}
return true
}
func isUserInfoEqual(u1, u2 madmin.UserInfo) bool {
if u1.PolicyName != u2.PolicyName ||
u1.Status != u2.Status ||
u1.SecretKey != u2.SecretKey {
return false
}
for len(u1.MemberOf) != len(u2.MemberOf) {
return false
}
for _, v1 := range u1.MemberOf {
var found bool
for _, v2 := range u2.MemberOf {
if v1 == v2 {
found = true
break
}
}
if !found {
return false
}
}
return true
}
func isPolicyMappingEqual(p1, p2 srPolicyMapping) bool {
return p1.Policy == p2.Policy && p1.IsGroup == p2.IsGroup && p1.UserOrGroup == p2.UserOrGroup
}
type srPeerInfo struct {
madmin.PeerInfo
EndpointURL *url.URL
}
// getPeerForUpload returns the site replication peer handling this upload. Defaults to local cluster otherwise
func (c *SiteReplicationSys) getPeerForUpload(deplID string) (pi srPeerInfo, local bool) {
ci, _ := c.GetClusterInfo(GlobalContext)
if !ci.Enabled {
return pi, true
}
for _, site := range ci.Sites {
if deplID == site.DeploymentID {
ep, _ := url.Parse(site.Endpoint)
pi = srPeerInfo{
PeerInfo: site,
EndpointURL: ep,
}
return pi, site.DeploymentID == globalDeploymentID
}
}
return pi, true
}
// startResync initiates resync of data to peerSite specified. The overall site resync status
// is maintained in .minio.sys/buckets/site-replication/resync/<deployment-id.meta>, while collecting
// individual bucket resync status in .minio.sys/buckets/<bucket-name>/replication/resync.bin
func (c *SiteReplicationSys) startResync(ctx context.Context, objAPI ObjectLayer, peer madmin.PeerInfo) (res madmin.SRResyncOpStatus, err error) {
if !c.isEnabled() {
return res, errSRNotEnabled
}
if objAPI == nil {
return res, errSRObjectLayerNotReady
}
if peer.DeploymentID == globalDeploymentID {
return res, errSRResyncToSelf
}
if _, ok := c.state.Peers[peer.DeploymentID]; !ok {
return res, errSRPeerNotFound
}
rs, err := globalSiteResyncMetrics.siteStatus(ctx, objAPI, peer.DeploymentID)
if err != nil {
return res, err
}
if rs.Status == ResyncStarted {
return res, errSRResyncStarted
}
var buckets []BucketInfo
buckets, err = objAPI.ListBuckets(ctx, BucketOptions{})
if err != nil {
return res, err
}
rs = newSiteResyncStatus(peer.DeploymentID, buckets)
defer func() {
if err != nil {
rs.Status = ResyncFailed
saveSiteResyncMetadata(ctx, rs, objAPI)
globalSiteResyncMetrics.updateState(rs)
}
}()
globalSiteResyncMetrics.updateState(rs)
if err := saveSiteResyncMetadata(ctx, rs, objAPI); err != nil {
return res, err
}
for _, bi := range buckets {
bucket := bi.Name
if _, err := getReplicationConfig(ctx, bucket); err != nil {
res.Buckets = append(res.Buckets, madmin.ResyncBucketStatus{
ErrDetail: err.Error(),
Bucket: bucket,
Status: ResyncFailed.String(),
})
continue
}
// mark remote target for this deployment with the new reset id
tgtArn := globalBucketTargetSys.getRemoteARNForPeer(bucket, peer)
if tgtArn == "" {
res.Buckets = append(res.Buckets, madmin.ResyncBucketStatus{
ErrDetail: fmt.Sprintf("no valid remote target found for this peer %s (%s)", peer.Name, peer.DeploymentID),
Bucket: bucket,
})
continue
}
target := globalBucketTargetSys.GetRemoteBucketTargetByArn(ctx, bucket, tgtArn)
target.ResetBeforeDate = UTCNow()
target.ResetID = rs.ResyncID
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, true); err != nil {
res.Buckets = append(res.Buckets, madmin.ResyncBucketStatus{
ErrDetail: err.Error(),
Bucket: bucket,
})
continue
}
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
if err != nil {
res.Buckets = append(res.Buckets, madmin.ResyncBucketStatus{
ErrDetail: err.Error(),
Bucket: bucket,
})
continue
}
tgtBytes, err := json.Marshal(&targets)
if err != nil {
res.Buckets = append(res.Buckets, madmin.ResyncBucketStatus{
ErrDetail: err.Error(),
Bucket: bucket,
})
continue
}
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil {
res.Buckets = append(res.Buckets, madmin.ResyncBucketStatus{
ErrDetail: err.Error(),
Bucket: bucket,
})
continue
}
if err := globalReplicationPool.resyncer.start(ctx, objAPI, resyncOpts{
bucket: bucket,
arn: tgtArn,
resyncID: rs.ResyncID,
}); err != nil {
res.Buckets = append(res.Buckets, madmin.ResyncBucketStatus{
ErrDetail: err.Error(),
Bucket: bucket,
})
continue
}
}
res = madmin.SRResyncOpStatus{
Status: ResyncStarted.String(),
OpType: "start",
ResyncID: rs.ResyncID,
}
if len(res.Buckets) > 0 {
res.ErrDetail = "partial failure in starting site resync"
}
return res, nil
}
// cancelResync stops an ongoing site level resync for the peer specified.
func (c *SiteReplicationSys) cancelResync(ctx context.Context, objAPI ObjectLayer, peer madmin.PeerInfo) (res madmin.SRResyncOpStatus, err error) {
if !c.isEnabled() {
return res, errSRNotEnabled
}
if objAPI == nil {
return res, errSRObjectLayerNotReady
}
if peer.DeploymentID == globalDeploymentID {
return res, errSRResyncToSelf
}
if _, ok := c.state.Peers[peer.DeploymentID]; !ok {
return res, errSRPeerNotFound
}
rs, err := globalSiteResyncMetrics.siteStatus(ctx, objAPI, peer.DeploymentID)
if err != nil {
return res, err
}
switch rs.Status {
case ResyncCanceled:
return res, errSRResyncCanceled
case ResyncCompleted, NoResync:
return res, errSRNoResync
}
res = madmin.SRResyncOpStatus{
Status: rs.Status.String(),
OpType: "cancel",
ResyncID: rs.ResyncID,
}
switch rs.Status {
case ResyncCanceled:
return res, errSRResyncCanceled
case ResyncCompleted, NoResync:
return res, errSRNoResync
}
targets := globalBucketTargetSys.ListTargets(ctx, "", string(madmin.ReplicationService))
// clear the remote target resetID set while initiating resync to stop replication
for _, t := range targets {
if t.ResetID == rs.ResyncID {
// get tgt with credentials
tgt := globalBucketTargetSys.GetRemoteBucketTargetByArn(ctx, t.SourceBucket, t.Arn)
tgt.ResetID = ""
bucket := t.SourceBucket
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &tgt, true); err != nil {
res.Buckets = append(res.Buckets, madmin.ResyncBucketStatus{
ErrDetail: err.Error(),
Bucket: bucket,
})
continue
}
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
if err != nil {
res.Buckets = append(res.Buckets, madmin.ResyncBucketStatus{
ErrDetail: err.Error(),
Bucket: bucket,
})
continue
}
tgtBytes, err := json.Marshal(&targets)
if err != nil {
res.Buckets = append(res.Buckets, madmin.ResyncBucketStatus{
ErrDetail: err.Error(),
Bucket: bucket,
})
continue
}
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil {
res.Buckets = append(res.Buckets, madmin.ResyncBucketStatus{
ErrDetail: err.Error(),
Bucket: bucket,
})
continue
}
// update resync state for the bucket
globalReplicationPool.resyncer.Lock()
m, ok := globalReplicationPool.resyncer.statusMap[bucket]
if !ok {
m = newBucketResyncStatus(bucket)
}
if st, ok := m.TargetsMap[t.Arn]; ok {
st.LastUpdate = UTCNow()
st.ResyncStatus = ResyncCanceled
m.TargetsMap[t.Arn] = st
m.LastUpdate = UTCNow()
}
globalReplicationPool.resyncer.statusMap[bucket] = m
globalReplicationPool.resyncer.Unlock()
}
}
rs.Status = ResyncCanceled
rs.LastUpdate = UTCNow()
if err := saveSiteResyncMetadata(ctx, rs, objAPI); err != nil {
return res, err
}
select {
case globalReplicationPool.resyncer.resyncCancelCh <- struct{}{}:
case <-ctx.Done():
}
globalSiteResyncMetrics.updateState(rs)
res.Status = rs.Status.String()
return res, nil
}
const (
siteResyncMetaFormat = 1
siteResyncMetaVersionV1 = 1
siteResyncMetaVersion = siteResyncMetaVersionV1
siteResyncSaveInterval = 10 * time.Second
)
func newSiteResyncStatus(dID string, buckets []BucketInfo) SiteResyncStatus {
now := UTCNow()
s := SiteResyncStatus{
Version: siteResyncMetaVersion,
Status: ResyncStarted,
DeplID: dID,
TotBuckets: len(buckets),
BucketStatuses: make(map[string]ResyncStatusType),
}
for _, bi := range buckets {
s.BucketStatuses[bi.Name] = ResyncPending
}
s.ResyncID = mustGetUUID()
s.StartTime = now
s.LastUpdate = now
return s
}
// load site resync metadata from disk
func loadSiteResyncMetadata(ctx context.Context, objAPI ObjectLayer, dID string) (rs SiteResyncStatus, e error) {
data, err := readConfig(GlobalContext, objAPI, getSRResyncFilePath(dID))
if err != nil {
return rs, err
}
if len(data) == 0 {
// Seems to be empty.
return rs, nil
}
if len(data) <= 4 {
return rs, fmt.Errorf("site resync: no data")
}
// Read resync meta header
switch binary.LittleEndian.Uint16(data[0:2]) {
case siteResyncMetaFormat:
default:
return rs, fmt.Errorf("resyncMeta: unknown format: %d", binary.LittleEndian.Uint16(data[0:2]))
}
switch binary.LittleEndian.Uint16(data[2:4]) {
case siteResyncMetaVersion:
default:
return rs, fmt.Errorf("resyncMeta: unknown version: %d", binary.LittleEndian.Uint16(data[2:4]))
}
// OK, parse data.
if _, err = rs.UnmarshalMsg(data[4:]); err != nil {
return rs, err
}
switch rs.Version {
case siteResyncMetaVersionV1:
default:
return rs, fmt.Errorf("unexpected resync meta version: %d", rs.Version)
}
return rs, nil
}
// save resync status of peer to resync/depl-id.meta
func saveSiteResyncMetadata(ctx context.Context, ss SiteResyncStatus, objectAPI ObjectLayer) error {
data := make([]byte, 4, ss.Msgsize()+4)
// Initialize the resync meta header.
binary.LittleEndian.PutUint16(data[0:2], siteResyncMetaFormat)
binary.LittleEndian.PutUint16(data[2:4], siteResyncMetaVersion)
buf, err := ss.MarshalMsg(data)
if err != nil {
return err
}
return saveConfig(ctx, objectAPI, getSRResyncFilePath(ss.DeplID), buf)
}
func getSRResyncFilePath(dID string) string {
return pathJoin(siteResyncPrefix, dID+".meta")
}
<file_sep>// Copyright (c) 2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import "github.com/minio/minio/internal/bucket/lifecycle"
//go:generate stringer -type lcEventSrc -trimprefix lcEventSrc_ $GOFILE
type lcEventSrc uint8
//revive:disable:var-naming Underscores is used here to indicate where common prefix ends and the enumeration name begins
const (
lcEventSrc_None lcEventSrc = iota
lcEventSrc_Scanner
lcEventSrc_Decom
lcEventSrc_Rebal
lcEventSrc_s3HeadObject
lcEventSrc_s3GetObject
lcEventSrc_s3ListObjects
lcEventSrc_s3PutObject
lcEventSrc_s3CopyObject
lcEventSrc_s3CompleteMultipartUpload
)
//revive:enable:var-naming
type lcAuditEvent struct {
lifecycle.Event
source lcEventSrc
}
func (lae lcAuditEvent) Tags() map[string]interface{} {
event := lae.Event
src := lae.source
const (
ilmSrc = "ilm-src"
ilmAction = "ilm-action"
ilmDue = "ilm-due"
ilmRuleID = "ilm-rule-id"
ilmTier = "ilm-tier"
ilmNewerNoncurrentVersions = "ilm-newer-noncurrent-versions"
ilmNoncurrentDays = "ilm-noncurrent-days"
)
tags := make(map[string]interface{}, 5)
if src > lcEventSrc_None {
tags[ilmSrc] = src.String()
}
tags[ilmAction] = event.Action.String()
tags[ilmRuleID] = event.RuleID
if !event.Due.IsZero() {
tags[ilmDue] = event.Due
}
// rule with Transition/NoncurrentVersionTransition in effect
if event.StorageClass != "" {
tags[ilmTier] = event.StorageClass
}
// rule with NewernoncurrentVersions in effect
if event.NewerNoncurrentVersions > 0 {
tags[ilmNewerNoncurrentVersions] = event.NewerNoncurrentVersions
}
if event.NoncurrentDays > 0 {
tags[ilmNoncurrentDays] = event.NoncurrentDays
}
return tags
}
func newLifecycleAuditEvent(src lcEventSrc, event lifecycle.Event) lcAuditEvent {
return lcAuditEvent{
Event: event,
source: src,
}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//go:generate msgp -file=$GOFILE -unexported
package cmd
import (
"time"
"github.com/minio/madmin-go/v3"
)
const (
sizeLessThan1KiB = iota
sizeLessThan1MiB
sizeLessThan10MiB
sizeLessThan100MiB
sizeLessThan1GiB
sizeGreaterThan1GiB
// Add new entries here
sizeLastElemMarker
)
// sizeToTag converts a size to a tag.
func sizeToTag(size int64) int {
switch {
case size < 1024:
return sizeLessThan1KiB
case size < 1024*1024:
return sizeLessThan1MiB
case size < 10*1024*1024:
return sizeLessThan10MiB
case size < 100*1024*1024:
return sizeLessThan100MiB
case size < 1024*1024*1024:
return sizeLessThan1GiB
default:
return sizeGreaterThan1GiB
}
}
func sizeTagToString(tag int) string {
switch tag {
case sizeLessThan1KiB:
return "LESS_THAN_1_KiB"
case sizeLessThan1MiB:
return "LESS_THAN_1_MiB"
case sizeLessThan10MiB:
return "LESS_THAN_10_MiB"
case sizeLessThan100MiB:
return "LESS_THAN_100_MiB"
case sizeLessThan1GiB:
return "LESS_THAN_1_GiB"
case sizeGreaterThan1GiB:
return "GREATER_THAN_1_GiB"
default:
return "unknown"
}
}
// AccElem holds information for calculating an average value
type AccElem struct {
Total int64
Size int64
N int64
}
// Add a duration to a single element.
func (a *AccElem) add(dur time.Duration) {
if dur < 0 {
dur = 0
}
a.Total += int64(dur)
a.N++
}
// Add a duration to a single element.
func (a *AccElem) addSize(dur time.Duration, sz int64) {
if dur < 0 {
dur = 0
}
a.Total += int64(dur)
a.Size += sz
a.N++
}
// Merge b into a.
func (a *AccElem) merge(b AccElem) {
a.N += b.N
a.Total += b.Total
a.Size += b.Size
}
// Avg returns average time spent.
func (a AccElem) avg() time.Duration {
if a.N >= 1 && a.Total > 0 {
return time.Duration(a.Total / a.N)
}
return 0
}
// asTimedAction returns the element as a madmin.TimedAction.
func (a AccElem) asTimedAction() madmin.TimedAction {
return madmin.TimedAction{AccTime: uint64(a.Total), Count: uint64(a.N), Bytes: uint64(a.Size)}
}
// lastMinuteLatency keeps track of last minute latency.
type lastMinuteLatency struct {
Totals [60]AccElem
LastSec int64
}
// Merge data of two lastMinuteLatency structure
func (l lastMinuteLatency) merge(o lastMinuteLatency) (merged lastMinuteLatency) {
if l.LastSec > o.LastSec {
o.forwardTo(l.LastSec)
merged.LastSec = l.LastSec
} else {
l.forwardTo(o.LastSec)
merged.LastSec = o.LastSec
}
for i := range merged.Totals {
merged.Totals[i] = AccElem{
Total: l.Totals[i].Total + o.Totals[i].Total,
N: l.Totals[i].N + o.Totals[i].N,
Size: l.Totals[i].Size + o.Totals[i].Size,
}
}
return merged
}
// Add a new duration data
func (l *lastMinuteLatency) add(t time.Duration) {
sec := time.Now().Unix()
l.forwardTo(sec)
winIdx := sec % 60
l.Totals[winIdx].add(t)
l.LastSec = sec
}
// Add a new duration data
func (l *lastMinuteLatency) addSize(t time.Duration, sz int64) {
sec := time.Now().Unix()
l.forwardTo(sec)
winIdx := sec % 60
l.Totals[winIdx].addSize(t, sz)
l.LastSec = sec
}
// Merge all recorded latencies of last minute into one
func (l *lastMinuteLatency) getTotal() AccElem {
var res AccElem
sec := time.Now().Unix()
l.forwardTo(sec)
for _, elem := range l.Totals[:] {
res.merge(elem)
}
return res
}
// forwardTo time t, clearing any entries in between.
func (l *lastMinuteLatency) forwardTo(t int64) {
if l.LastSec >= t {
return
}
if t-l.LastSec >= 60 {
l.Totals = [60]AccElem{}
return
}
for l.LastSec != t {
// Clear next element.
idx := (l.LastSec + 1) % 60
l.Totals[idx] = AccElem{}
l.LastSec++
}
}
// LastMinuteHistogram keeps track of last minute sizes added.
type LastMinuteHistogram [sizeLastElemMarker]lastMinuteLatency
// Merge safely merges two LastMinuteHistogram structures into one
func (l LastMinuteHistogram) Merge(o LastMinuteHistogram) (merged LastMinuteHistogram) {
for i := range l {
merged[i] = l[i].merge(o[i])
}
return merged
}
// Add latency t from object with the specified size.
func (l *LastMinuteHistogram) Add(size int64, t time.Duration) {
l[sizeToTag(size)].add(t)
}
// GetAvgData will return the average for each bucket from the last time minute.
// The number of objects is also included.
func (l *LastMinuteHistogram) GetAvgData() [sizeLastElemMarker]AccElem {
var res [sizeLastElemMarker]AccElem
for i, elem := range l[:] {
res[i] = elem.getTotal()
}
return res
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"encoding/xml"
"io"
)
// From Veeam-SOSAPI_1.0_Document_v1.02d.pdf
// - SOSAPI Protocol Version
// - Model Name of the vendor plus version for statistical analysis.
// - List of Smart Object Storage protocol capabilities supported by the server.
// Currently, there are three capabilities supported:
// - Capacity Reporting
// - Backup data locality for upload sessions (Veeam Smart Entity)
// - Handover of IAM & STS Endpoints instead of manual definition in Veeam Backup & Replication. This allows Veeam
// Agents to directly backup to object storage.
//
// An object storage system can implement one, multiple, or all functions.
//
// - Optional (mandatory if <IAMSTS> is true): Set Endpoints for IAM and STS processing.
//
// - Optional: Set server preferences for Backup & Replication parallel sessions, batch size of deletes, and block sizes (before
// compression). This is an optional area; by default, there should be no <SystemRecommendations> section in the
// system.xml. Vendors can work with Veeam Product Management and the Alliances team on getting approval to integrate
// specific system recommendations based on current support case statistics and storage performance possibilities.
// Vendors might change the settings based on the configuration and scale out of the solution (more storage nodes =>
// higher task limit).
//
// <S3ConcurrentTaskLimit>
//
// - Defines how many S3 operations are executed parallel within one Repository Task Slot (and within one backup object
// that gets offloaded). The same registry key setting overwrites the storage-defined setting.
// Optional value, default 64, range: 1-unlimited
//
// - <S3MultiObjectDeleteLimit>
// Some of the Veeam products use Multi Delete operations. This setting can reduce how many objects are included in one
// multi-delete operation. The same registry key setting overwrites the storage-defined setting.
// Optional value, default 1000, range: 1-unlimited (S3 standard maximum is 1000 and should not be set higher)
//
// - <StorageConcurrentTasksLimit>
// Setting reduces the parallel Repository Task slots that offload or write data to object storage. The same user interface
// setting overwrites the storage-defined setting.
// Optional value, default 0, range: 0-unlimited (0 equals unlimited, which means the maximum configured repository task
// slots are used for object offloading or writing)
//
// - <KbBlockSize>
// Veeam Block Size for backup and restore processing before compression is applied. The higher the block size, the more
// backup space is needed for incremental backups. Larger block sizes also mean less performance for random read restore
// methods like Instant Restore, File Level Recovery, and Database/Application restores. Veeam recommends that vendors
// optimize the storage system for the default value of 1MB minus compression object sizes. The setting simultaneously
// affects read from source, block, file, dedup, and object storage backup targets for a specific Veeam Job. When customers
// create a new backup job and select the object storage or a SOBR as a backup target with this setting, the job default
// setting will be set to this value. This setting will be only applied to newly created jobs (manual changes with Active Full
// processing possible from the customer side).
// Optional value, default 1024, allowed values 256,512,1024,4096,8192, value defined in KB size.
//
// - The object should be present in all buckets accessed by Veeam products that want to leverage the SOSAPI functionality.
//
// - The current protocol version is 1.0.
type apiEndpoints struct {
IAMEndpoint string `xml:"IAMEndpoint"`
STSEndpoint string `xml:"STSEndpoint"`
}
type systemInfo struct {
XMLName xml.Name `xml:"SystemInfo" json:"-"`
ProtocolVersion string `xml:"ProtocolVersion"`
ModelName string `xml:"ModelName"`
ProtocolCapabilities struct {
CapacityInfo bool `xml:"CapacityInfo"`
UploadSessions bool `xml:"UploadSessions"`
IAMSTS bool `xml:"IAMSTS"`
} `mxl:"ProtocolCapabilities"`
APIEndpoints *apiEndpoints `xml:"APIEndpoints,omitempty"`
SystemRecommendations struct {
S3ConcurrentTaskLimit int `xml:"S3ConcurrentTaskLimit,omitempty"`
S3MultiObjectDeleteLimit int `xml:"S3MultiObjectDeleteLimit,omitempty"`
StorageCurrentTaskLimit int `xml:"StorageCurrentTaskLimit,omitempty"`
KBBlockSize int `xml:"KbBlockSize"`
} `xml:"SystemRecommendations"`
}
// This optional functionality allows vendors to report space information to Veeam products, and Veeam will make placement
// decisions based on this information. For example, Veeam Backup & Replication has a Scale-out-Backup-Repository feature where
// multiple buckets can be used together. The placement logic for additional backup files is based on available space. Other values
// will augment the Veeam user interface and statistics, including free space warnings.
type capacityInfo struct {
XMLName xml.Name `xml:"CapacityInfo" json:"-"`
Capacity int64 `xml:"Capacity"`
Available int64 `xml:"Available"`
Used int64 `xml:"Used"`
}
const (
systemXMLObject = ".system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/system.xml"
capacityXMLObject = ".system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/capacity.xml"
)
func veeamSOSAPIHeadObject(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
gr, err := veeamSOSAPIGetObject(ctx, bucket, object, nil, opts)
if gr != nil {
gr.Close()
return gr.ObjInfo, nil
}
return ObjectInfo{}, err
}
func veeamSOSAPIGetObject(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, opts ObjectOptions) (gr *GetObjectReader, err error) {
var buf []byte
switch object {
case systemXMLObject:
si := systemInfo{
ProtocolVersion: `"1.0"`,
ModelName: "\"MinIO " + ReleaseTag + "\"",
}
si.ProtocolCapabilities.CapacityInfo = true
// Default recommended block size with MinIO
si.SystemRecommendations.KBBlockSize = 4096
buf = encodeResponse(&si)
case capacityXMLObject:
objAPI := newObjectLayerFn()
if objAPI == nil {
return nil, errServerNotInitialized
}
info := objAPI.StorageInfo(ctx)
info.Backend = objAPI.BackendInfo()
usableTotal := int64(GetTotalUsableCapacity(info.Disks, info))
usableFree := int64(GetTotalUsableCapacityFree(info.Disks, info))
ci := capacityInfo{
Capacity: usableTotal,
Available: usableFree,
Used: usableTotal - usableFree,
}
buf = encodeResponse(&ci)
default:
return nil, errFileNotFound
}
etag := getMD5Hash(buf)
r := bytes.NewReader(buf)
off, length := int64(0), r.Size()
if rs != nil {
off, length, err = rs.GetOffsetLength(r.Size())
if err != nil {
return nil, err
}
}
r.Seek(off, io.SeekStart)
return NewGetObjectReaderFromReader(io.LimitReader(r, length), ObjectInfo{
Bucket: bucket,
Name: object,
Size: r.Size(),
IsLatest: true,
ContentType: string(mimeXML),
NumVersions: 1,
ETag: etag,
ModTime: UTCNow(),
}, opts)
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package http
import (
"context"
"fmt"
"net"
"syscall"
)
type acceptResult struct {
conn net.Conn
err error
lidx int
}
// httpListener - HTTP listener capable of handling multiple server addresses.
type httpListener struct {
tcpListeners []*net.TCPListener // underlaying TCP listeners.
acceptCh chan acceptResult // channel where all TCP listeners write accepted connection.
ctx context.Context
ctxCanceler context.CancelFunc
}
// start - starts separate goroutine for each TCP listener. A valid new connection is passed to httpListener.acceptCh.
func (listener *httpListener) start() {
// Closure to send acceptResult to acceptCh.
// It returns true if the result is sent else false if returns when doneCh is closed.
send := func(result acceptResult) bool {
select {
case listener.acceptCh <- result:
// Successfully written to acceptCh
return true
case <-listener.ctx.Done():
return false
}
}
// Closure to handle TCPListener until done channel is closed.
handleListener := func(idx int, tcpListener *net.TCPListener) {
for {
tcpConn, err := tcpListener.AcceptTCP()
if tcpConn != nil {
tcpConn.SetKeepAlive(true)
}
send(acceptResult{tcpConn, err, idx})
}
}
// Start separate goroutine for each TCP listener to handle connection.
for idx, tcpListener := range listener.tcpListeners {
go handleListener(idx, tcpListener)
}
}
// Accept - reads from httpListener.acceptCh for one of previously accepted TCP connection and returns the same.
func (listener *httpListener) Accept() (conn net.Conn, err error) {
select {
case result, ok := <-listener.acceptCh:
if ok {
return result.conn, result.err
}
case <-listener.ctx.Done():
}
return nil, syscall.EINVAL
}
// Close - closes underneath all TCP listeners.
func (listener *httpListener) Close() (err error) {
listener.ctxCanceler()
for i := range listener.tcpListeners {
listener.tcpListeners[i].Close()
}
return nil
}
// Addr - net.Listener interface compatible method returns net.Addr. In case of multiple TCP listeners, it returns '0.0.0.0' as IP address.
func (listener *httpListener) Addr() (addr net.Addr) {
addr = listener.tcpListeners[0].Addr()
if len(listener.tcpListeners) == 1 {
return addr
}
tcpAddr := addr.(*net.TCPAddr)
if ip := net.ParseIP("0.0.0.0"); ip != nil {
tcpAddr.IP = ip
}
addr = tcpAddr
return addr
}
// Addrs - returns all address information of TCP listeners.
func (listener *httpListener) Addrs() (addrs []net.Addr) {
for i := range listener.tcpListeners {
addrs = append(addrs, listener.tcpListeners[i].Addr())
}
return addrs
}
// TCPOptions specify customizable TCP optimizations on raw socket
type TCPOptions struct {
UserTimeout int // this value is expected to be in milliseconds
Interface string // this is a VRF device passed via `--interface` flag
Trace func(msg string) // Trace when starting.
}
// newHTTPListener - creates new httpListener object which is interface compatible to net.Listener.
// httpListener is capable to
// * listen to multiple addresses
// * controls incoming connections only doing HTTP protocol
func newHTTPListener(ctx context.Context, serverAddrs []string, opts TCPOptions) (listener *httpListener, listenErrs []error) {
tcpListeners := make([]*net.TCPListener, 0, len(serverAddrs))
listenErrs = make([]error, len(serverAddrs))
// Unix listener with special TCP options.
listenCfg := net.ListenConfig{
Control: setTCPParametersFn(opts),
}
for i, serverAddr := range serverAddrs {
var (
l net.Listener
e error
)
if l, e = listenCfg.Listen(ctx, "tcp", serverAddr); e != nil {
if opts.Trace != nil {
opts.Trace(fmt.Sprint("listenCfg.Listen: ", e.Error()))
}
listenErrs[i] = e
continue
}
tcpListener, ok := l.(*net.TCPListener)
if !ok {
listenErrs[i] = fmt.Errorf("unexpected listener type found %v, expected net.TCPListener", l)
if opts.Trace != nil {
opts.Trace(fmt.Sprint("net.TCPListener: ", listenErrs[i].Error()))
}
continue
}
if opts.Trace != nil {
opts.Trace(fmt.Sprint("adding listener to ", tcpListener.Addr()))
}
tcpListeners = append(tcpListeners, tcpListener)
}
if len(tcpListeners) == 0 {
// No listeners initialized, no need to continue
return
}
listener = &httpListener{
tcpListeners: tcpListeners,
acceptCh: make(chan acceptResult, len(tcpListeners)),
}
listener.ctx, listener.ctxCanceler = context.WithCancel(ctx)
if opts.Trace != nil {
opts.Trace(fmt.Sprint("opening ", len(listener.tcpListeners), " listeners"))
}
listener.start()
return
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"sort"
"time"
"github.com/minio/madmin-go/v3"
)
// BucketTargetUsageInfo - bucket target usage info provides
// - replicated size for all objects sent to this target
// - replica size for all objects received from this target
// - replication pending size for all objects pending replication to this target
// - replication failed size for all objects failed replication to this target
// - replica pending count
// - replica failed count
type BucketTargetUsageInfo struct {
ReplicationPendingSize uint64 `json:"objectsPendingReplicationTotalSize"`
ReplicationFailedSize uint64 `json:"objectsFailedReplicationTotalSize"`
ReplicatedSize uint64 `json:"objectsReplicatedTotalSize"`
ReplicaSize uint64 `json:"objectReplicaTotalSize"`
ReplicationPendingCount uint64 `json:"objectsPendingReplicationCount"`
ReplicationFailedCount uint64 `json:"objectsFailedReplicationCount"`
}
// BucketUsageInfo - bucket usage info provides
// - total size of the bucket
// - total objects in a bucket
// - object size histogram per bucket
type BucketUsageInfo struct {
Size uint64 `json:"size"`
// Following five fields suffixed with V1 are here for backward compatibility
// Total Size for objects that have not yet been replicated
ReplicationPendingSizeV1 uint64 `json:"objectsPendingReplicationTotalSize"`
// Total size for objects that have witness one or more failures and will be retried
ReplicationFailedSizeV1 uint64 `json:"objectsFailedReplicationTotalSize"`
// Total size for objects that have been replicated to destination
ReplicatedSizeV1 uint64 `json:"objectsReplicatedTotalSize"`
// Total number of objects pending replication
ReplicationPendingCountV1 uint64 `json:"objectsPendingReplicationCount"`
// Total number of objects that failed replication
ReplicationFailedCountV1 uint64 `json:"objectsFailedReplicationCount"`
ObjectsCount uint64 `json:"objectsCount"`
ObjectSizesHistogram map[string]uint64 `json:"objectsSizesHistogram"`
ObjectVersionsHistogram map[string]uint64 `json:"objectsVersionsHistogram"`
VersionsCount uint64 `json:"versionsCount"`
ReplicaSize uint64 `json:"objectReplicaTotalSize"`
ReplicationInfo map[string]BucketTargetUsageInfo `json:"objectsReplicationInfo"`
}
// DataUsageInfo represents data usage stats of the underlying Object API
type DataUsageInfo struct {
// LastUpdate is the timestamp of when the data usage info was last updated.
// This does not indicate a full scan.
LastUpdate time.Time `json:"lastUpdate"`
// Objects total count across all buckets
ObjectsTotalCount uint64 `json:"objectsCount"`
// Objects total count across all buckets
VersionsTotalCount uint64 `json:"versionsCount"`
// Objects total size across all buckets
ObjectsTotalSize uint64 `json:"objectsTotalSize"`
ReplicationInfo map[string]BucketTargetUsageInfo `json:"objectsReplicationInfo"`
// Total number of buckets in this cluster
BucketsCount uint64 `json:"bucketsCount"`
// Buckets usage info provides following information across all buckets
// - total size of the bucket
// - total objects in a bucket
// - object size histogram per bucket
BucketsUsage map[string]BucketUsageInfo `json:"bucketsUsageInfo"`
// Deprecated kept here for backward compatibility reasons.
BucketSizes map[string]uint64 `json:"bucketsSizes"`
// TierStats contains per-tier stats of all configured remote tiers
TierStats *allTierStats `json:"tierStats,omitempty"`
}
func (dui DataUsageInfo) tierStats() []madmin.TierInfo {
if dui.TierStats == nil {
return nil
}
cfgs := globalTierConfigMgr.ListTiers()
if len(cfgs) == 0 {
return nil
}
ts := make(map[string]madmin.TierStats, len(cfgs)+1)
infos := make([]madmin.TierInfo, 0, len(ts))
// Add STANDARD (hot-tier)
ts[minioHotTier] = madmin.TierStats{}
infos = append(infos, madmin.TierInfo{
Name: minioHotTier,
Type: "internal",
})
// Add configured remote tiers
for _, cfg := range cfgs {
ts[cfg.Name] = madmin.TierStats{}
infos = append(infos, madmin.TierInfo{
Name: cfg.Name,
Type: cfg.Type.String(),
})
}
ts = dui.TierStats.adminStats(ts)
for i := range infos {
info := infos[i]
infos[i].Stats = ts[info.Name]
}
sort.Slice(infos, func(i, j int) bool {
if infos[i].Type == "internal" {
return true
}
if infos[j].Type == "internal" {
return false
}
return infos[i].Name < infos[j].Name
})
return infos
}
func (dui DataUsageInfo) tierMetrics() (metrics []Metric) {
if dui.TierStats == nil {
return nil
}
// e.g minio_cluster_ilm_transitioned_bytes{tier="S3TIER-1"}=136314880
// minio_cluster_ilm_transitioned_objects{tier="S3TIER-1"}=1
// minio_cluster_ilm_transitioned_versions{tier="S3TIER-1"}=3
for tier, st := range dui.TierStats.Tiers {
metrics = append(metrics, Metric{
Description: getClusterTransitionedBytesMD(),
Value: float64(st.TotalSize),
VariableLabels: map[string]string{"tier": tier},
})
metrics = append(metrics, Metric{
Description: getClusterTransitionedObjectsMD(),
Value: float64(st.NumObjects),
VariableLabels: map[string]string{"tier": tier},
})
metrics = append(metrics, Metric{
Description: getClusterTransitionedVersionsMD(),
Value: float64(st.NumVersions),
VariableLabels: map[string]string{"tier": tier},
})
}
return metrics
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ldap
import (
"crypto/x509"
"errors"
"sort"
"time"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/config"
"github.com/minio/pkg/ldap"
)
const (
defaultLDAPExpiry = time.Hour * 1
minLDAPExpiry time.Duration = 15 * time.Minute
maxLDAPExpiry time.Duration = 365 * 24 * time.Hour
)
// Config contains AD/LDAP server connectivity information.
type Config struct {
LDAP ldap.Config
stsExpiryDuration time.Duration // contains converted value
}
// Enabled returns if LDAP is enabled.
func (l *Config) Enabled() bool {
return l.LDAP.Enabled
}
// Clone returns a cloned copy of LDAP config.
func (l *Config) Clone() Config {
if l == nil {
return Config{}
}
cfg := Config{
LDAP: l.LDAP.Clone(),
stsExpiryDuration: l.stsExpiryDuration,
}
return cfg
}
// LDAP keys and envs.
const (
ServerAddr = "server_addr"
SRVRecordName = "srv_record_name"
LookupBindDN = "lookup_bind_dn"
LookupBindPassword = "<PASSWORD>"
UserDNSearchBaseDN = "user_dn_search_base_dn"
UserDNSearchFilter = "user_dn_search_filter"
GroupSearchFilter = "group_search_filter"
GroupSearchBaseDN = "group_search_base_dn"
TLSSkipVerify = "tls_skip_verify"
ServerInsecure = "server_insecure"
ServerStartTLS = "server_starttls"
EnvServerAddr = "MINIO_IDENTITY_LDAP_SERVER_ADDR"
EnvSRVRecordName = "MINIO_IDENTITY_LDAP_SRV_RECORD_NAME"
EnvTLSSkipVerify = "MINIO_IDENTITY_LDAP_TLS_SKIP_VERIFY"
EnvServerInsecure = "MINIO_IDENTITY_LDAP_SERVER_INSECURE"
EnvServerStartTLS = "MINIO_IDENTITY_LDAP_SERVER_STARTTLS"
EnvUsernameFormat = "MINIO_IDENTITY_LDAP_USERNAME_FORMAT"
EnvUserDNSearchBaseDN = "MINIO_IDENTITY_LDAP_USER_DN_SEARCH_BASE_DN"
EnvUserDNSearchFilter = "MINIO_IDENTITY_LDAP_USER_DN_SEARCH_FILTER"
EnvGroupSearchFilter = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_FILTER"
EnvGroupSearchBaseDN = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_BASE_DN"
EnvLookupBindDN = "MINIO_IDENTITY_LDAP_LOOKUP_BIND_DN"
EnvLookupBindPassword = "<PASSWORD>"
)
var removedKeys = []string{
"sts_expiry",
"username_format",
"username_search_filter",
"username_search_base_dn",
"group_name_attribute",
}
// DefaultKVS - default config for LDAP config
var (
DefaultKVS = config.KVS{
config.KV{
Key: config.Enable,
Value: "",
},
config.KV{
Key: ServerAddr,
Value: "",
},
config.KV{
Key: SRVRecordName,
Value: "",
},
config.KV{
Key: UserDNSearchBaseDN,
Value: "",
},
config.KV{
Key: UserDNSearchFilter,
Value: "",
},
config.KV{
Key: GroupSearchFilter,
Value: "",
},
config.KV{
Key: GroupSearchBaseDN,
Value: "",
},
config.KV{
Key: TLSSkipVerify,
Value: config.EnableOff,
},
config.KV{
Key: ServerInsecure,
Value: config.EnableOff,
},
config.KV{
Key: ServerStartTLS,
Value: config.EnableOff,
},
config.KV{
Key: LookupBindDN,
Value: "",
},
config.KV{
Key: LookupBindPassword,
Value: "",
},
}
)
// Enabled returns if LDAP config is enabled.
func Enabled(kvs config.KVS) bool {
return kvs.Get(ServerAddr) != ""
}
// Lookup - initializes LDAP config, overrides config, if any ENV values are set.
func Lookup(s config.Config, rootCAs *x509.CertPool) (l Config, err error) {
l = Config{}
// Purge all removed keys first
kvs := s[config.IdentityLDAPSubSys][config.Default]
if len(kvs) > 0 {
for _, k := range removedKeys {
kvs.Delete(k)
}
s[config.IdentityLDAPSubSys][config.Default] = kvs
}
if err := s.CheckValidKeys(config.IdentityLDAPSubSys, removedKeys); err != nil {
return l, err
}
getCfgVal := func(cfgParam string) string {
// As parameters are already validated, we skip checking
// if the config param was found.
val, _, _ := s.ResolveConfigParam(config.IdentityLDAPSubSys, config.Default, cfgParam, false)
return val
}
ldapServer := getCfgVal(ServerAddr)
if ldapServer == "" {
return l, nil
}
l.LDAP = ldap.Config{
Enabled: true,
RootCAs: rootCAs,
ServerAddr: ldapServer,
SRVRecordName: getCfgVal(SRVRecordName),
}
// Parse explicitly enable=on/off flag. If not set, defaults to `true`
// because ServerAddr is set.
if v := getCfgVal(config.Enable); v != "" {
l.LDAP.Enabled, err = config.ParseBool(v)
if err != nil {
return l, err
}
}
l.stsExpiryDuration = defaultLDAPExpiry
// LDAP connection configuration
if v := getCfgVal(ServerInsecure); v != "" {
l.LDAP.ServerInsecure, err = config.ParseBool(v)
if err != nil {
return l, err
}
}
if v := getCfgVal(ServerStartTLS); v != "" {
l.LDAP.ServerStartTLS, err = config.ParseBool(v)
if err != nil {
return l, err
}
}
if v := getCfgVal(TLSSkipVerify); v != "" {
l.LDAP.TLSSkipVerify, err = config.ParseBool(v)
if err != nil {
return l, err
}
}
// Lookup bind user configuration
l.LDAP.LookupBindDN = getCfgVal(LookupBindDN)
l.LDAP.LookupBindPassword = getCfgVal(LookupBindPassword)
// User DN search configuration
l.LDAP.UserDNSearchFilter = getCfgVal(UserDNSearchFilter)
l.LDAP.UserDNSearchBaseDistName = getCfgVal(UserDNSearchBaseDN)
// Group search params configuration
l.LDAP.GroupSearchFilter = getCfgVal(GroupSearchFilter)
l.LDAP.GroupSearchBaseDistName = getCfgVal(GroupSearchBaseDN)
// Validate and test configuration.
valResult := l.LDAP.Validate()
if !valResult.IsOk() {
return l, valResult
}
return l, nil
}
// GetConfigList - returns a list of LDAP configurations.
func (l *Config) GetConfigList(s config.Config) ([]madmin.IDPListItem, error) {
ldapConfigs, err := s.GetAvailableTargets(config.IdentityLDAPSubSys)
if err != nil {
return nil, err
}
// For now, ldapConfigs will only have a single entry for the default
// configuration.
var res []madmin.IDPListItem
for _, cfg := range ldapConfigs {
res = append(res, madmin.IDPListItem{
Type: "ldap",
Name: cfg,
Enabled: l.Enabled(),
})
}
return res, nil
}
// ErrProviderConfigNotFound - represents a non-existing provider error.
var ErrProviderConfigNotFound = errors.New("provider configuration not found")
// GetConfigInfo - returns config details for an LDAP configuration.
func (l *Config) GetConfigInfo(s config.Config, cfgName string) ([]madmin.IDPCfgInfo, error) {
// For now only a single LDAP config is supported.
if cfgName != madmin.Default {
return nil, ErrProviderConfigNotFound
}
kvsrcs, err := s.GetResolvedConfigParams(config.IdentityLDAPSubSys, cfgName, true)
if err != nil {
return nil, err
}
res := make([]madmin.IDPCfgInfo, 0, len(kvsrcs))
for _, kvsrc := range kvsrcs {
// skip default values.
if kvsrc.Src == config.ValueSourceDef {
continue
}
res = append(res, madmin.IDPCfgInfo{
Key: kvsrc.Key,
Value: kvsrc.Value,
IsCfg: true,
IsEnv: kvsrc.Src == config.ValueSourceEnv,
})
}
// sort the structs by the key
sort.Slice(res, func(i, j int) bool {
return res[i].Key < res[j].Key
})
return res, nil
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"math/rand"
"sync"
"time"
"github.com/minio/madmin-go/v3"
)
//go:generate msgp -file=$GOFILE
// SiteResyncStatus captures current replication resync status for a target site
type SiteResyncStatus struct {
Version int `json:"version" msg:"v"`
// Overall site status
Status ResyncStatusType `json:"st" msg:"ss"`
DeplID string `json:"dId" msg:"did"`
BucketStatuses map[string]ResyncStatusType `json:"buckets" msg:"bkts"`
TotBuckets int `json:"totbuckets" msg:"tb"`
TargetReplicationResyncStatus `json:"currSt" msg:"cst"`
}
func (s *SiteResyncStatus) clone() SiteResyncStatus {
if s == nil {
return SiteResyncStatus{}
}
o := *s
o.BucketStatuses = make(map[string]ResyncStatusType, len(s.BucketStatuses))
for b, st := range s.BucketStatuses {
o.BucketStatuses[b] = st
}
return o
}
const (
siteResyncPrefix = bucketMetaPrefix + "/site-replication/resync"
)
type resyncState struct {
resyncID string
LastSaved time.Time
}
//msgp:ignore siteResyncMetrics
type siteResyncMetrics struct {
sync.RWMutex
// resyncStatus maps resync ID to resync status for peer
resyncStatus map[string]SiteResyncStatus
// map peer deployment ID to resync ID
peerResyncMap map[string]resyncState
}
func newSiteResyncMetrics(ctx context.Context) *siteResyncMetrics {
s := siteResyncMetrics{
resyncStatus: make(map[string]SiteResyncStatus),
peerResyncMap: make(map[string]resyncState),
}
go s.save(ctx)
go s.init(ctx)
return &s
}
// init site resync metrics
func (sm *siteResyncMetrics) init(ctx context.Context) {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
// Run the site resync metrics load in a loop
for {
if err := sm.load(ctx, newObjectLayerFn()); err == nil {
<-ctx.Done()
return
}
duration := time.Duration(r.Float64() * float64(time.Second*10))
if duration < time.Second {
// Make sure to sleep atleast a second to avoid high CPU ticks.
duration = time.Second
}
time.Sleep(duration)
}
}
// load resync metrics saved on disk into memory
func (sm *siteResyncMetrics) load(ctx context.Context, objAPI ObjectLayer) error {
if objAPI == nil {
return errServerNotInitialized
}
info, err := globalSiteReplicationSys.GetClusterInfo(ctx)
if err != nil {
return err
}
if !info.Enabled {
return nil
}
for _, peer := range info.Sites {
if peer.DeploymentID == globalDeploymentID {
continue
}
rs, err := loadSiteResyncMetadata(ctx, objAPI, peer.DeploymentID)
if err != nil {
return err
}
sm.Lock()
defer sm.Unlock()
if _, ok := sm.peerResyncMap[peer.DeploymentID]; !ok {
sm.peerResyncMap[peer.DeploymentID] = resyncState{resyncID: rs.ResyncID, LastSaved: time.Time{}}
sm.resyncStatus[rs.ResyncID] = rs
}
}
return nil
}
func (sm *siteResyncMetrics) report(dID string) *madmin.SiteResyncMetrics {
sm.RLock()
defer sm.RUnlock()
rst, ok := sm.peerResyncMap[dID]
if !ok {
return nil
}
rs, ok := sm.resyncStatus[rst.resyncID]
if !ok {
return nil
}
m := madmin.SiteResyncMetrics{
CollectedAt: rs.LastUpdate,
StartTime: rs.StartTime,
LastUpdate: rs.LastUpdate,
ResyncStatus: rs.Status.String(),
ResyncID: rst.resyncID,
DeplID: rs.DeplID,
ReplicatedSize: rs.ReplicatedSize,
ReplicatedCount: rs.ReplicatedCount,
FailedSize: rs.FailedSize,
FailedCount: rs.FailedCount,
Bucket: rs.Bucket,
Object: rs.Object,
NumBuckets: int64(rs.TotBuckets),
}
for b, st := range rs.BucketStatuses {
if st == ResyncFailed {
m.FailedBuckets = append(m.FailedBuckets, b)
}
}
return &m
}
// save in-memory stats to disk
func (sm *siteResyncMetrics) save(ctx context.Context) {
sTimer := time.NewTimer(siteResyncSaveInterval)
defer sTimer.Stop()
for {
select {
case <-sTimer.C:
if globalSiteReplicationSys.isEnabled() {
sm.Lock()
wg := sync.WaitGroup{}
for dID, rs := range sm.peerResyncMap {
st, ok := sm.resyncStatus[rs.resyncID]
if ok {
updt := st.Status.isValid() && st.LastUpdate.After(rs.LastSaved)
if !updt {
continue
}
rs.LastSaved = UTCNow()
sm.peerResyncMap[dID] = rs
wg.Add(1)
go func() {
defer wg.Done()
saveSiteResyncMetadata(ctx, st, newObjectLayerFn())
}()
}
}
wg.Wait()
sm.Unlock()
}
sTimer.Reset(siteResyncSaveInterval)
case <-ctx.Done():
return
}
}
}
// update overall site resync state
func (sm *siteResyncMetrics) updateState(s SiteResyncStatus) {
if !globalSiteReplicationSys.isEnabled() {
return
}
sm.Lock()
defer sm.Unlock()
switch s.Status {
case ResyncStarted:
sm.peerResyncMap[s.DeplID] = resyncState{resyncID: s.ResyncID, LastSaved: time.Time{}}
sm.resyncStatus[s.ResyncID] = s
case ResyncCompleted, ResyncCanceled, ResyncFailed:
st, ok := sm.resyncStatus[s.ResyncID]
if ok {
st.LastUpdate = s.LastUpdate
st.Status = s.Status
}
sm.resyncStatus[s.ResyncID] = st
}
}
// increment SyncedBuckets count
func (sm *siteResyncMetrics) incBucket(o resyncOpts, bktStatus ResyncStatusType) {
if !globalSiteReplicationSys.isEnabled() {
return
}
sm.Lock()
defer sm.Unlock()
st, ok := sm.resyncStatus[o.resyncID]
if ok {
if st.BucketStatuses == nil {
st.BucketStatuses = map[string]ResyncStatusType{}
}
switch bktStatus {
case ResyncCompleted:
st.BucketStatuses[o.bucket] = ResyncCompleted
st.Status = siteResyncStatus(st.Status, st.BucketStatuses)
st.LastUpdate = UTCNow()
sm.resyncStatus[o.resyncID] = st
case ResyncFailed:
st.BucketStatuses[o.bucket] = ResyncFailed
st.Status = siteResyncStatus(st.Status, st.BucketStatuses)
st.LastUpdate = UTCNow()
sm.resyncStatus[o.resyncID] = st
}
}
}
// remove deleted bucket from active resync tracking
func (sm *siteResyncMetrics) deleteBucket(b string) {
if !globalSiteReplicationSys.isEnabled() {
return
}
sm.Lock()
defer sm.Unlock()
for _, rs := range sm.peerResyncMap {
st, ok := sm.resyncStatus[rs.resyncID]
if !ok {
return
}
switch st.Status {
case ResyncCompleted, ResyncFailed:
return
default:
delete(st.BucketStatuses, b)
}
}
}
// returns overall resync status from individual bucket resync status map
func siteResyncStatus(currSt ResyncStatusType, m map[string]ResyncStatusType) ResyncStatusType {
// avoid overwriting canceled resync status
if currSt != ResyncStarted {
return currSt
}
totBuckets := len(m)
var cmpCount, failCount int
for _, st := range m {
switch st {
case ResyncCompleted:
cmpCount++
case ResyncFailed:
failCount++
}
}
if cmpCount == totBuckets {
return ResyncCompleted
}
if cmpCount+failCount == totBuckets {
return ResyncFailed
}
return ResyncStarted
}
// update resync metrics per object
func (sm *siteResyncMetrics) updateMetric(roi ReplicateObjectInfo, success bool, resyncID string) {
if !globalSiteReplicationSys.isEnabled() {
return
}
sm.Lock()
defer sm.Unlock()
s := sm.resyncStatus[resyncID]
if success {
s.ReplicatedCount++
s.ReplicatedSize += roi.Size
} else {
s.FailedCount++
s.FailedSize += roi.Size
}
s.Bucket = roi.Bucket
s.Object = roi.Name
s.LastUpdate = UTCNow()
sm.resyncStatus[resyncID] = s
}
// Status returns current in-memory resync status for this deployment
func (sm *siteResyncMetrics) status(dID string) (rs SiteResyncStatus, err error) {
sm.RLock()
defer sm.RUnlock()
if rst, ok1 := sm.peerResyncMap[dID]; ok1 {
if st, ok2 := sm.resyncStatus[rst.resyncID]; ok2 {
return st.clone(), nil
}
}
return rs, errSRNoResync
}
// Status returns latest resync status for this deployment
func (sm *siteResyncMetrics) siteStatus(ctx context.Context, objAPI ObjectLayer, dID string) (rs SiteResyncStatus, err error) {
if !globalSiteReplicationSys.isEnabled() {
return rs, errSRNotEnabled
}
// check in-memory status
rs, err = sm.status(dID)
if err == nil {
return rs, nil
}
// check disk resync status
rs, err = loadSiteResyncMetadata(ctx, objAPI, dID)
if err != nil && err == errConfigNotFound {
return rs, nil
}
return rs, err
}
<file_sep># AssumeRoleWithCertificate [](https://slack.min.io)
## Introduction
MinIO provides a custom STS API that allows authentication with client X.509 / TLS certificates.
A major advantage of certificate-based authentication compared to other STS authentication methods, like OpenID Connect or LDAP/AD, is that client authentication works without any additional/external component that must be constantly available. Therefore, certificate-based authentication may provide better availability / lower operational complexity.
The MinIO TLS STS API can be configured via MinIO's standard configuration API (i.e. using `mc admin config set/get`). Further, it can be configured via the following environment variables:
```
mc admin config set myminio identity_tls --env
KEY:
identity_tls enable X.509 TLS certificate SSO support
ARGS:
MINIO_IDENTITY_TLS_SKIP_VERIFY (on|off) trust client certificates without verification. Defaults to "off" (verify)
```
The MinIO TLS STS API is disabled by default. However, it can be *enabled* by setting environment variable:
```
export MINIO_IDENTITY_TLS_ENABLE=on
```
## Example
MinIO exposes a custom S3 STS API endpoint as `Action=AssumeRoleWithCertificate`. A client has to send an HTTP `POST` request to `https://<host>:<port>?Action=AssumeRoleWithCertificate&Version=2011-06-15`. Since the authentication and authorization happens via X.509 certificates the client has to send the request over **TLS** and has to provide
a client certificate.
The following curl example shows how to authenticate to a MinIO server with client certificate and obtain STS access credentials.
```curl
curl -X POST --key private.key --cert public.crt "https://minio:9000?Action=AssumeRoleWithCertificate&Version=2011-06-15&DurationSeconds=3600"
```
```xml
<?xml version="1.0" encoding="UTF-8"?>
<AssumeRoleWithCertificateResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
<AssumeRoleWithCertificateResult>
<Credentials>
<AccessKeyId><KEY></AccessKeyId>
<SecretAccessKey><KEY>SecretAccessKey>
<Expiration>2021-07-19T20:10:45Z</Expiration
<SessionToken><KEY></SessionToken>
</Credentials>
</AssumeRoleWithCertificateResult>
<ResponseMetadata>
<RequestId>169339CD8B3A6948</RequestId>
</ResponseMetadata>
</AssumeRoleWithCertificateResponse>
```
## Authentication Flow
A client can request temp. S3 credentials via the STS API. It can authenticate via a client certificate and obtain a access/secret key pair as well as a session token. These credentials are associated to an S3 policy at the MinIO server.
In case of certificate-based authentication, MinIO has to map the client-provided certificate to an S3 policy. MinIO does this via the subject common name field of the X.509 certificate. So, MinIO will associate a certificate with a subject `CN = foobar` to a S3 policy named `foobar`.
The following self-signed certificate is issued for `consoleAdmin`. So, MinIO would associate it with the pre-defined `consoleAdmin` policy.
```
Certificate:
Data:
Version: 3 (0x2)
Serial Number:
35:ac:60:46:ad:8d:de:18:dc:0b:f6:98:14:ee:89:e8
Signature Algorithm: ED25519
Issuer: CN = consoleAdmin
Validity
Not Before: Jul 19 15:08:44 2021 GMT
Not After : Aug 18 15:08:44 2021 GMT
Subject: CN = consoleAdmin
Subject Public Key Info:
Public Key Algorithm: ED25519
ED25519 Public-Key:
pub:
5a:91:87:b8:77:fe:d4:af:d9:c7:c7:ce:55:ae:74:
aa:f3:f1:fe:04:63:9b:cb:20:97:61:97:90:94:fa:
12:8b
X509v3 extensions:
X509v3 Key Usage: critical
Digital Signature
X509v3 Extended Key Usage:
TLS Web Client Authentication
X509v3 Basic Constraints: critical
CA:FALSE
Signature Algorithm: ED25519
7e:aa:be:ed:47:4d:b9:2f:fc:ed:7f:5a:fc:6b:c0:05:5b:f5:
a0:31:fe:86:e3:8e:3f:49:af:6d:d5:ac:c7:c4:57:47:ce:97:
7d:ab:b8:e9:75:ec:b4:39:fb:c8:cf:53:16:5b:1f:15:b6:7f:
5a:d1:35:2d:fc:31:3a:10:e7:0c
```
> Observe the `Subject: CN = consoleAdmin` field.
Also, note that the certificate has to contain the `Extended Key Usage: TLS Web Client Authentication`. Otherwise, MinIO would not accept the certificate as client certificate.
Now, the STS certificate-based authentication happens in 4 steps:
- Client sends HTTP `POST` request over a TLS connection hitting the MinIO TLS STS API.
- MinIO verifies that the client certificate is valid.
- MinIO tries to find a policy that matches the `CN` of the client certificate.
- MinIO returns temp. S3 credentials associated to the found policy.
The returned credentials expiry after a certain period of time that can be configured via `&DurationSeconds=3600`. By default, the STS credentials are valid for 1 hour. The minimum expiration allowed is 15 minutes.
Further, the temp. S3 credentials will never out-live the client certificate. For example, if the `MINIO_IDENTITY_TLS_STS_EXPIRY` is 7 days but the certificate itself is only valid for the next 3 days, then MinIO will return S3 credentials that are valid for 3 days only.
## Caveat
*Applications that use direct S3 API will work fine, however interactive users uploading content using (when POSTing to the presigned URL an app generates) a popup becomes visible on browser to provide client certs, you would have to manually cancel and continue. This may be annoying to use but there is no workaround for now.*
## Explore Further
- [MinIO Admin Complete Guide](https://min.io/docs/minio/linux/reference/minio-mc-admin.html)
- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html)
<file_sep>//go:build !linux
// +build !linux
// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package http
import (
"context"
"net"
"time"
)
// CheckPortAvailability - check if given host and port is already in use.
// Note: The check method tries to listen on given port and closes it.
// It is possible to have a disconnected client in this tiny window of time.
func CheckPortAvailability(host, port string, opts TCPOptions) (err error) {
lc := &net.ListenConfig{}
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
l, err := lc.Listen(ctx, "tcp", net.JoinHostPort(host, port))
if err != nil {
return err
}
// As we are able to listen on this network, the port is not in use.
// Close the listener and continue check other networks.
return l.Close()
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
// Package fips provides functionality to configure cryptographic
// implementations compliant with FIPS 140.
//
// FIPS 140 [1] is a US standard for data processing that specifies
// requirements for cryptographic modules. Software that is "FIPS 140
// compliant" must use approved cryptographic primitives only and that
// are implemented by a FIPS 140 certified cryptographic module.
//
// So, FIPS 140 requires that a certified implementation of e.g. AES
// is used to implement more high-level cryptographic protocols.
// It does not require any specific security criteria for those
// high-level protocols. FIPS 140 focuses only on the implementation
// and usage of the most low-level cryptographic building blocks.
//
// [1]: https://en.wikipedia.org/wiki/FIPS_140
package fips
import (
"crypto/tls"
"github.com/minio/sio"
)
// Enabled indicates whether cryptographic primitives,
// like AES or SHA-256, are implemented using a FIPS 140
// certified module.
//
// If FIPS-140 is enabled no non-NIST/FIPS approved
// primitives must be used.
const Enabled = enabled
// DARECiphers returns a list of supported cipher suites
// for the DARE object encryption.
func DARECiphers() []byte {
if Enabled {
return []byte{sio.AES_256_GCM}
}
return []byte{sio.AES_256_GCM, sio.CHACHA20_POLY1305}
}
// TLSCiphers returns a list of supported TLS transport
// cipher suite IDs.
//
// The list contains only ciphers that use AES-GCM or
// (non-FIPS) CHACHA20-POLY1305 and ellitpic curve key
// exchange.
func TLSCiphers() []uint16 {
if Enabled {
return []uint16{
tls.TLS_AES_128_GCM_SHA256, // TLS 1.3
tls.TLS_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // TLS 1.2
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
}
}
return []uint16{
tls.TLS_CHACHA20_POLY1305_SHA256, // TLS 1.3
tls.TLS_AES_128_GCM_SHA256,
tls.TLS_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, // TLS 1.2
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
}
}
// TLSCiphersBackwardCompatible returns a list of supported
// TLS transport cipher suite IDs.
//
// In contrast to TLSCiphers, the list contains additional
// ciphers for backward compatibility. In particular, AES-CBC
// and non-ECDHE ciphers.
func TLSCiphersBackwardCompatible() []uint16 {
if Enabled {
return []uint16{
tls.TLS_AES_128_GCM_SHA256, // TLS 1.3
tls.TLS_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // TLS 1.2 ECDHE GCM
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, // TLS 1.2 ECDHE CBC
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_GCM_SHA256, // TLS 1.2 non-ECDHE
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
}
}
return []uint16{
tls.TLS_CHACHA20_POLY1305_SHA256, // TLS 1.3
tls.TLS_AES_128_GCM_SHA256,
tls.TLS_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, // TLS 1.2 ECDHE GCM / POLY1305
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, // TLS 1.2 ECDHE CBC
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_GCM_SHA256, // TLS 1.2 non-ECDHE
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
}
}
// TLSCurveIDs returns a list of supported elliptic curve IDs
// in preference order.
func TLSCurveIDs() []tls.CurveID {
var curves []tls.CurveID
if !Enabled {
curves = append(curves, tls.X25519) // Only enable X25519 in non-FIPS mode
}
curves = append(curves, tls.CurveP256)
if go19 {
// With go1.19 enable P384, P521 newer constant time implementations.
curves = append(curves, tls.CurveP384, tls.CurveP521)
}
return curves
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package kms
// Top level config constants for KMS
const (
EnvKMSSecretKey = "MINIO_KMS_SECRET_KEY"
EnvKMSSecretKeyFile = "MINIO_KMS_SECRET_KEY_FILE"
EnvKESEndpoint = "MINIO_KMS_KES_ENDPOINT" // One or multiple KES endpoints, separated by ','
EnvKESEnclave = "MINIO_KMS_KES_ENCLAVE" // Optional "namespace" within a KES cluster - not required for stateless KES
EnvKESKeyName = "MINIO_KMS_KES_KEY_NAME" // The default key name used for IAM data and when no key ID is specified on a bucket
EnvKESAPIKey = "MINIO_KMS_KES_API_KEY" // Access credential for KES - API keys and private key / certificate are mutually exclusive
EnvKESClientKey = "MINIO_KMS_KES_KEY_FILE" // Path to TLS private key for authenticating to KES with mTLS - usually prefer API keys
EnvKESClientPassword = "<PASSWORD>_KMS_KES_KEY_PASSWORD" // Optional password to decrypt an encrypt TLS private key
EnvKESClientCert = "MINIO_KMS_KES_CERT_FILE" // Path to TLS certificate for authenticating to KES with mTLS - usually prefer API keys
EnvKESServerCA = "MINIO_KMS_KES_CAPATH" // Path to file/directory containing CA certificates to verify the KES server certificate
)
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"encoding/binary"
"errors"
"fmt"
"io"
"math"
"math/rand"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/lithammer/shortuuid/v4"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/hash"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/env"
)
//go:generate msgp -file $GOFILE -unexported
// rebalanceStats contains per-pool rebalance statistics like number of objects,
// versions and bytes rebalanced out of a pool
type rebalanceStats struct {
InitFreeSpace uint64 `json:"initFreeSpace" msg:"ifs"` // Pool free space at the start of rebalance
InitCapacity uint64 `json:"initCapacity" msg:"ic"` // Pool capacity at the start of rebalance
Buckets []string `json:"buckets" msg:"bus"` // buckets being rebalanced or to be rebalanced
RebalancedBuckets []string `json:"rebalancedBuckets" msg:"rbs"` // buckets rebalanced
Bucket string `json:"bucket" msg:"bu"` // Last rebalanced bucket
Object string `json:"object" msg:"ob"` // Last rebalanced object
NumObjects uint64 `json:"numObjects" msg:"no"` // Number of objects rebalanced
NumVersions uint64 `json:"numVersions" msg:"nv"` // Number of versions rebalanced
Bytes uint64 `json:"bytes" msg:"bs"` // Number of bytes rebalanced
Participating bool `json:"participating" msg:"par"`
Info rebalanceInfo `json:"info" msg:"inf"`
}
func (rs *rebalanceStats) update(bucket string, fi FileInfo) {
if fi.IsLatest {
rs.NumObjects++
}
rs.NumVersions++
onDiskSz := int64(0)
if !fi.Deleted {
onDiskSz = fi.Size * int64(fi.Erasure.DataBlocks+fi.Erasure.ParityBlocks) / int64(fi.Erasure.DataBlocks)
}
rs.Bytes += uint64(onDiskSz)
rs.Bucket = bucket
rs.Object = fi.Name
}
type rstats []*rebalanceStats
//go:generate stringer -type=rebalStatus -trimprefix=rebal $GOFILE
type rebalStatus uint8
const (
rebalNone rebalStatus = iota
rebalStarted
rebalCompleted
rebalStopped
rebalFailed
)
type rebalanceInfo struct {
StartTime time.Time `msg:"startTs"` // Time at which rebalance-start was issued
EndTime time.Time `msg:"stopTs"` // Time at which rebalance operation completed or rebalance-stop was called
Status rebalStatus `msg:"status"` // Current state of rebalance operation. One of Started|Stopped|Completed|Failed.
}
// rebalanceMeta contains information pertaining to an ongoing rebalance operation.
type rebalanceMeta struct {
cancel context.CancelFunc `msg:"-"` // to be invoked on rebalance-stop
lastRefreshedAt time.Time `msg:"-"`
StoppedAt time.Time `msg:"stopTs"` // Time when rebalance-stop was issued.
ID string `msg:"id"` // ID of the ongoing rebalance operation
PercentFreeGoal float64 `msg:"pf"` // Computed from total free space and capacity at the start of rebalance
PoolStats []*rebalanceStats `msg:"rss"` // Per-pool rebalance stats keyed by pool index
}
var errRebalanceNotStarted = errors.New("rebalance not started")
func (z *erasureServerPools) loadRebalanceMeta(ctx context.Context) error {
r := &rebalanceMeta{}
err := r.load(ctx, z.serverPools[0])
if err != nil {
if errors.Is(err, errConfigNotFound) {
return nil
}
return err
}
z.rebalMu.Lock()
z.rebalMeta = r
z.rebalMu.Unlock()
return nil
}
// initRebalanceMeta initializes rebalance metadata for a new rebalance
// operation and saves it in the object store.
func (z *erasureServerPools) initRebalanceMeta(ctx context.Context, buckets []string) (arn string, err error) {
r := &rebalanceMeta{
ID: shortuuid.New(),
PoolStats: make([]*rebalanceStats, len(z.serverPools)),
}
// Fetch disk capacity and available space.
si := z.StorageInfo(ctx)
diskStats := make([]struct {
AvailableSpace uint64
TotalSpace uint64
}, len(z.serverPools))
var totalCap, totalFree uint64
for _, disk := range si.Disks {
// Ignore invalid.
if disk.PoolIndex < 0 || len(diskStats) <= disk.PoolIndex {
// https://github.com/minio/minio/issues/16500
continue
}
totalCap += disk.TotalSpace
totalFree += disk.AvailableSpace
diskStats[disk.PoolIndex].AvailableSpace += disk.AvailableSpace
diskStats[disk.PoolIndex].TotalSpace += disk.TotalSpace
}
r.PercentFreeGoal = float64(totalFree) / float64(totalCap)
now := time.Now()
for idx := range z.serverPools {
r.PoolStats[idx] = &rebalanceStats{
Buckets: make([]string, len(buckets)),
RebalancedBuckets: make([]string, 0, len(buckets)),
InitFreeSpace: diskStats[idx].AvailableSpace,
InitCapacity: diskStats[idx].TotalSpace,
}
copy(r.PoolStats[idx].Buckets, buckets)
if pfi := float64(diskStats[idx].AvailableSpace) / float64(diskStats[idx].TotalSpace); pfi < r.PercentFreeGoal {
r.PoolStats[idx].Participating = true
r.PoolStats[idx].Info = rebalanceInfo{
StartTime: now,
Status: rebalStarted,
}
}
}
err = r.save(ctx, z.serverPools[0])
if err != nil {
return arn, err
}
z.rebalMeta = r
return r.ID, nil
}
func (z *erasureServerPools) updatePoolStats(poolIdx int, bucket string, fi FileInfo) {
z.rebalMu.Lock()
defer z.rebalMu.Unlock()
r := z.rebalMeta
if r == nil {
return
}
r.PoolStats[poolIdx].update(bucket, fi)
}
const (
rebalMetaName = "rebalance.bin"
rebalMetaFmt = 1
rebalMetaVer = 1
)
func (z *erasureServerPools) nextRebalBucket(poolIdx int) (string, bool) {
z.rebalMu.RLock()
defer z.rebalMu.RUnlock()
r := z.rebalMeta
if r == nil {
return "", false
}
ps := r.PoolStats[poolIdx]
if ps == nil {
return "", false
}
if ps.Info.Status == rebalCompleted || !ps.Participating {
return "", false
}
if len(ps.Buckets) == 0 {
return "", false
}
return ps.Buckets[0], true
}
func (z *erasureServerPools) bucketRebalanceDone(bucket string, poolIdx int) {
z.rebalMu.Lock()
defer z.rebalMu.Unlock()
ps := z.rebalMeta.PoolStats[poolIdx]
if ps == nil {
return
}
for i, b := range ps.Buckets {
if b == bucket {
ps.Buckets = append(ps.Buckets[:i], ps.Buckets[i+1:]...)
ps.RebalancedBuckets = append(ps.RebalancedBuckets, bucket)
break
}
}
}
func (r *rebalanceMeta) load(ctx context.Context, store objectIO) error {
return r.loadWithOpts(ctx, store, ObjectOptions{})
}
func (r *rebalanceMeta) loadWithOpts(ctx context.Context, store objectIO, opts ObjectOptions) error {
data, _, err := readConfigWithMetadata(ctx, store, rebalMetaName, opts)
if err != nil {
return err
}
if len(data) == 0 {
return nil
}
if len(data) <= 4 {
return fmt.Errorf("rebalanceMeta: no data")
}
// Read header
switch binary.LittleEndian.Uint16(data[0:2]) {
case rebalMetaFmt:
default:
return fmt.Errorf("rebalanceMeta: unknown format: %d", binary.LittleEndian.Uint16(data[0:2]))
}
switch binary.LittleEndian.Uint16(data[2:4]) {
case rebalMetaVer:
default:
return fmt.Errorf("rebalanceMeta: unknown version: %d", binary.LittleEndian.Uint16(data[2:4]))
}
// OK, parse data.
if _, err = r.UnmarshalMsg(data[4:]); err != nil {
return err
}
r.lastRefreshedAt = time.Now()
return nil
}
func (r *rebalanceMeta) saveWithOpts(ctx context.Context, store objectIO, opts ObjectOptions) error {
data := make([]byte, 4, r.Msgsize()+4)
// Initialize the header.
binary.LittleEndian.PutUint16(data[0:2], rebalMetaFmt)
binary.LittleEndian.PutUint16(data[2:4], rebalMetaVer)
buf, err := r.MarshalMsg(data)
if err != nil {
return err
}
return saveConfigWithOpts(ctx, store, rebalMetaName, buf, opts)
}
func (r *rebalanceMeta) save(ctx context.Context, store objectIO) error {
return r.saveWithOpts(ctx, store, ObjectOptions{})
}
func (z *erasureServerPools) IsRebalanceStarted() bool {
z.rebalMu.RLock()
defer z.rebalMu.RUnlock()
if r := z.rebalMeta; r != nil {
if r.StoppedAt.IsZero() {
return true
}
}
return false
}
func (z *erasureServerPools) IsPoolRebalancing(poolIndex int) bool {
z.rebalMu.RLock()
defer z.rebalMu.RUnlock()
if r := z.rebalMeta; r != nil {
if !r.StoppedAt.IsZero() {
return false
}
ps := z.rebalMeta.PoolStats[poolIndex]
return ps.Participating && ps.Info.Status == rebalStarted
}
return false
}
func (z *erasureServerPools) rebalanceBuckets(ctx context.Context, poolIdx int) (err error) {
doneCh := make(chan struct{})
defer close(doneCh)
// Save rebalance.bin periodically.
go func() {
// Update rebalance.bin periodically once every 5-10s, chosen randomly
// to avoid multiple pool leaders herding to update around the same
// time.
r := rand.New(rand.NewSource(time.Now().UnixNano()))
randSleepFor := func() time.Duration {
return 5*time.Second + time.Duration(float64(5*time.Second)*r.Float64())
}
timer := time.NewTimer(randSleepFor())
defer timer.Stop()
var rebalDone bool
var traceMsg string
for {
select {
case <-doneCh:
// rebalance completed for poolIdx
now := time.Now()
z.rebalMu.Lock()
z.rebalMeta.PoolStats[poolIdx].Info.Status = rebalCompleted
z.rebalMeta.PoolStats[poolIdx].Info.EndTime = now
z.rebalMu.Unlock()
rebalDone = true
traceMsg = fmt.Sprintf("completed at %s", now)
case <-ctx.Done():
// rebalance stopped for poolIdx
now := time.Now()
z.rebalMu.Lock()
z.rebalMeta.PoolStats[poolIdx].Info.Status = rebalStopped
z.rebalMeta.PoolStats[poolIdx].Info.EndTime = now
z.rebalMeta.cancel = nil // remove the already used context.CancelFunc
z.rebalMu.Unlock()
rebalDone = true
traceMsg = fmt.Sprintf("stopped at %s", now)
case <-timer.C:
traceMsg = fmt.Sprintf("saved at %s", time.Now())
}
stopFn := globalRebalanceMetrics.log(rebalanceMetricSaveMetadata, poolIdx, traceMsg)
err := z.saveRebalanceStats(ctx, poolIdx, rebalSaveStats)
stopFn(err)
logger.LogIf(ctx, err)
timer.Reset(randSleepFor())
if rebalDone {
return
}
}
}()
for {
select {
case <-ctx.Done():
return
default:
}
bucket, ok := z.nextRebalBucket(poolIdx)
if !ok {
// no more buckets to rebalance or target free_space/capacity reached
break
}
stopFn := globalRebalanceMetrics.log(rebalanceMetricRebalanceBucket, poolIdx, bucket)
err = z.rebalanceBucket(ctx, bucket, poolIdx)
if err != nil {
stopFn(err)
logger.LogIf(ctx, err)
return
}
stopFn(nil)
z.bucketRebalanceDone(bucket, poolIdx)
}
return err
}
func (z *erasureServerPools) checkIfRebalanceDone(poolIdx int) bool {
z.rebalMu.Lock()
defer z.rebalMu.Unlock()
// check if enough objects have been rebalanced
r := z.rebalMeta
poolStats := r.PoolStats[poolIdx]
if poolStats.Info.Status == rebalCompleted {
return true
}
pfi := float64(poolStats.InitFreeSpace+poolStats.Bytes) / float64(poolStats.InitCapacity)
// Mark pool rebalance as done if within 5% from PercentFreeGoal.
if diff := math.Abs(pfi - r.PercentFreeGoal); diff <= 0.05 {
r.PoolStats[poolIdx].Info.Status = rebalCompleted
r.PoolStats[poolIdx].Info.EndTime = time.Now()
return true
}
return false
}
// rebalanceBucket rebalances objects under bucket in poolIdx pool
func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string, poolIdx int) error {
ctx = logger.SetReqInfo(ctx, &logger.ReqInfo{})
vc, _ := globalBucketVersioningSys.Get(bucket)
// Check if the current bucket has a configured lifecycle policy
lc, _ := globalLifecycleSys.Get(bucket)
// Check if bucket is object locked.
lr, _ := globalBucketObjectLockSys.Get(bucket)
pool := z.serverPools[poolIdx]
const envRebalanceWorkers = "_MINIO_REBALANCE_WORKERS"
wStr := env.Get(envRebalanceWorkers, strconv.Itoa(len(pool.sets)))
workerSize, err := strconv.Atoi(wStr)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("invalid %s value: %s err: %v, defaulting to %d", envRebalanceWorkers, wStr, err, len(pool.sets)))
workerSize = len(pool.sets)
}
workers := make(chan struct{}, workerSize)
var wg sync.WaitGroup
for _, set := range pool.sets {
set := set
disks := set.getOnlineDisks()
if len(disks) == 0 {
logger.LogIf(ctx, fmt.Errorf("no online disks found for set with endpoints %s",
set.getEndpoints()))
continue
}
filterLifecycle := func(bucket, object string, fi FileInfo) bool {
if lc == nil {
return false
}
versioned := vc != nil && vc.Versioned(object)
objInfo := fi.ToObjectInfo(bucket, object, versioned)
evt := evalActionFromLifecycle(ctx, *lc, lr, objInfo)
if evt.Action.Delete() {
globalExpiryState.enqueueByDays(objInfo, evt, lcEventSrc_Rebal)
return true
}
return false
}
rebalanceEntry := func(entry metaCacheEntry) {
defer func() {
<-workers
wg.Done()
}()
if entry.isDir() {
return
}
// rebalance on poolIdx has reached its goal
if z.checkIfRebalanceDone(poolIdx) {
return
}
fivs, err := entry.fileInfoVersions(bucket)
if err != nil {
return
}
// We need a reversed order for rebalance,
// to create the appropriate stack.
versionsSorter(fivs.Versions).reverse()
var rebalanced, expired int
for _, version := range fivs.Versions {
// Skip transitioned objects for now. TBD
if version.IsRemote() {
continue
}
// Apply lifecycle rules on the objects that are expired.
if filterLifecycle(bucket, version.Name, version) {
rebalanced++
expired++
continue
}
// any object with only single DEL marker we don't need
// to rebalance, just skip it, this also includes
// any other versions that have already expired.
remainingVersions := len(fivs.Versions) - expired
if version.Deleted && remainingVersions == 1 {
rebalanced++
continue
}
versionID := version.VersionID
if versionID == "" {
versionID = nullVersionID
}
if version.Deleted {
_, err := z.DeleteObject(ctx,
bucket,
version.Name,
ObjectOptions{
Versioned: true,
VersionID: versionID,
MTime: version.ModTime,
DeleteReplication: version.ReplicationState,
DeleteMarker: true, // make sure we create a delete marker
SkipRebalancing: true, // make sure we skip the decommissioned pool
})
var failure bool
if err != nil && !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
logger.LogIf(ctx, err)
failure = true
}
if !failure {
z.updatePoolStats(poolIdx, bucket, version)
rebalanced++
}
continue
}
var failure, ignore bool
for try := 0; try < 3; try++ {
// GetObjectReader.Close is called by rebalanceObject
stopFn := globalRebalanceMetrics.log(rebalanceMetricRebalanceObject, poolIdx, bucket, version.Name, version.VersionID)
gr, err := set.GetObjectNInfo(ctx,
bucket,
encodeDirObject(version.Name),
nil,
http.Header{},
ObjectOptions{
VersionID: versionID,
NoDecryption: true,
NoLock: true,
})
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
// object deleted by the application, nothing to do here we move on.
ignore = true
stopFn(nil)
break
}
if err != nil {
failure = true
logger.LogIf(ctx, err)
stopFn(err)
continue
}
if err = z.rebalanceObject(ctx, bucket, gr); err != nil {
failure = true
logger.LogIf(ctx, err)
stopFn(err)
continue
}
stopFn(nil)
failure = false
break
}
if ignore {
continue
}
if failure {
break // break out on first error
}
z.updatePoolStats(poolIdx, bucket, version)
rebalanced++
}
// if all versions were rebalanced, we can delete the object versions.
if rebalanced == len(fivs.Versions) {
stopFn := globalRebalanceMetrics.log(rebalanceMetricRebalanceRemoveObject, poolIdx, bucket, entry.name)
_, err := set.DeleteObject(ctx,
bucket,
encodeDirObject(entry.name),
ObjectOptions{
DeletePrefix: true, // use prefix delete to delete all versions at once.
},
)
stopFn(err)
auditLogRebalance(ctx, "Rebalance:DeleteObject", bucket, entry.name, "", err)
if err != nil {
logger.LogIf(ctx, err)
}
}
}
wg.Add(1)
go func() {
defer wg.Done()
// How to resolve partial results.
resolver := metadataResolutionParams{
dirQuorum: len(disks) / 2, // make sure to capture all quorum ratios
objQuorum: len(disks) / 2, // make sure to capture all quorum ratios
bucket: bucket,
}
err := listPathRaw(ctx, listPathRawOptions{
disks: disks,
bucket: bucket,
recursive: true,
forwardTo: "",
minDisks: len(disks) / 2, // to capture all quorum ratios
reportNotFound: false,
agreed: func(entry metaCacheEntry) {
workers <- struct{}{}
wg.Add(1)
go rebalanceEntry(entry)
},
partial: func(entries metaCacheEntries, _ []error) {
entry, ok := entries.resolve(&resolver)
if ok {
workers <- struct{}{}
wg.Add(1)
go rebalanceEntry(*entry)
}
},
finished: nil,
})
logger.LogIf(ctx, err)
}()
}
wg.Wait()
return nil
}
type rebalSaveOpts uint8
const (
rebalSaveStats rebalSaveOpts = iota
rebalSaveStoppedAt
)
func (z *erasureServerPools) saveRebalanceStats(ctx context.Context, poolIdx int, opts rebalSaveOpts) error {
lock := z.serverPools[0].NewNSLock(minioMetaBucket, rebalMetaName)
lkCtx, err := lock.GetLock(ctx, globalOperationTimeout)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("failed to acquire write lock on %s/%s: %w", minioMetaBucket, rebalMetaName, err))
return err
}
defer lock.Unlock(lkCtx)
ctx = lkCtx.Context()
noLockOpts := ObjectOptions{NoLock: true}
r := &rebalanceMeta{}
if err := r.loadWithOpts(ctx, z.serverPools[0], noLockOpts); err != nil {
return err
}
z.rebalMu.Lock()
defer z.rebalMu.Unlock()
switch opts {
case rebalSaveStoppedAt:
r.StoppedAt = time.Now()
case rebalSaveStats:
r.PoolStats[poolIdx] = z.rebalMeta.PoolStats[poolIdx]
}
z.rebalMeta = r
err = z.rebalMeta.saveWithOpts(ctx, z.serverPools[0], noLockOpts)
return err
}
func auditLogRebalance(ctx context.Context, apiName, bucket, object, versionID string, err error) {
errStr := ""
if err != nil {
errStr = err.Error()
}
auditLogInternal(ctx, AuditLogOptions{
Event: "rebalance",
APIName: apiName,
Bucket: bucket,
Object: object,
VersionID: versionID,
Error: errStr,
})
}
func (z *erasureServerPools) rebalanceObject(ctx context.Context, bucket string, gr *GetObjectReader) (err error) {
oi := gr.ObjInfo
defer func() {
gr.Close()
auditLogRebalance(ctx, "RebalanceCopyData", oi.Bucket, oi.Name, oi.VersionID, err)
}()
actualSize, err := oi.GetActualSize()
if err != nil {
return err
}
if oi.isMultipart() {
res, err := z.NewMultipartUpload(ctx, bucket, oi.Name, ObjectOptions{
VersionID: oi.VersionID,
UserDefined: oi.UserDefined,
})
if err != nil {
return fmt.Errorf("rebalanceObject: NewMultipartUpload() %w", err)
}
defer z.AbortMultipartUpload(ctx, bucket, oi.Name, res.UploadID, ObjectOptions{})
parts := make([]CompletePart, len(oi.Parts))
for i, part := range oi.Parts {
hr, err := hash.NewReader(io.LimitReader(gr, part.Size), part.Size, "", "", part.ActualSize)
if err != nil {
return fmt.Errorf("rebalanceObject: hash.NewReader() %w", err)
}
pi, err := z.PutObjectPart(ctx, bucket, oi.Name, res.UploadID,
part.Number,
NewPutObjReader(hr),
ObjectOptions{
PreserveETag: part.ETag, // Preserve original ETag to ensure same metadata.
IndexCB: func() []byte {
return part.Index // Preserve part Index to ensure decompression works.
},
})
if err != nil {
return fmt.Errorf("rebalanceObject: PutObjectPart() %w", err)
}
parts[i] = CompletePart{
ETag: pi.ETag,
PartNumber: pi.PartNumber,
}
}
_, err = z.CompleteMultipartUpload(ctx, bucket, oi.Name, res.UploadID, parts, ObjectOptions{
MTime: oi.ModTime,
})
if err != nil {
err = fmt.Errorf("rebalanceObject: CompleteMultipartUpload() %w", err)
}
return err
}
hr, err := hash.NewReader(gr, oi.Size, "", "", actualSize)
if err != nil {
return fmt.Errorf("rebalanceObject: hash.NewReader() %w", err)
}
_, err = z.PutObject(ctx,
bucket,
oi.Name,
NewPutObjReader(hr),
ObjectOptions{
VersionID: oi.VersionID,
MTime: oi.ModTime,
UserDefined: oi.UserDefined,
PreserveETag: oi.ETag, // Preserve original ETag to ensure same metadata.
IndexCB: func() []byte {
return oi.Parts[0].Index // Preserve part Index to ensure decompression works.
},
})
if err != nil {
err = fmt.Errorf("rebalanceObject: PutObject() %w", err)
}
return err
}
func (z *erasureServerPools) StartRebalance() {
z.rebalMu.Lock()
if z.rebalMeta == nil || !z.rebalMeta.StoppedAt.IsZero() { // rebalance not running, nothing to do
z.rebalMu.Unlock()
return
}
ctx, cancel := context.WithCancel(GlobalContext)
z.rebalMeta.cancel = cancel // to be used when rebalance-stop is called
z.rebalMu.Unlock()
z.rebalMu.RLock()
participants := make([]bool, len(z.rebalMeta.PoolStats))
for i, ps := range z.rebalMeta.PoolStats {
// skip pools which have completed rebalancing
if ps.Info.Status != rebalStarted {
continue
}
participants[i] = ps.Participating
}
z.rebalMu.RUnlock()
for poolIdx, doRebalance := range participants {
if !doRebalance {
continue
}
// nothing to do if this node is not pool's first node (i.e pool's rebalance 'leader').
if !globalEndpoints[poolIdx].Endpoints[0].IsLocal {
continue
}
go func(idx int) {
stopfn := globalRebalanceMetrics.log(rebalanceMetricRebalanceBuckets, idx)
err := z.rebalanceBuckets(ctx, idx)
stopfn(err)
}(poolIdx)
}
}
// StopRebalance signals the rebalance goroutine running on this node (if any)
// to stop, using the context.CancelFunc(s) saved at the time ofStartRebalance.
func (z *erasureServerPools) StopRebalance() error {
z.rebalMu.Lock()
defer z.rebalMu.Unlock()
r := z.rebalMeta
if r == nil { // rebalance not running in this node, nothing to do
return nil
}
if cancel := r.cancel; cancel != nil {
// cancel != nil only on pool leaders
r.cancel = nil
cancel()
}
return nil
}
// for rebalance trace support
type rebalanceMetrics struct{}
var globalRebalanceMetrics rebalanceMetrics
//go:generate stringer -type=rebalanceMetric -trimprefix=rebalanceMetric $GOFILE
type rebalanceMetric uint8
const (
rebalanceMetricRebalanceBuckets rebalanceMetric = iota
rebalanceMetricRebalanceBucket
rebalanceMetricRebalanceObject
rebalanceMetricRebalanceRemoveObject
rebalanceMetricSaveMetadata
)
func rebalanceTrace(r rebalanceMetric, poolIdx int, startTime time.Time, duration time.Duration, err error, path string) madmin.TraceInfo {
var errStr string
if err != nil {
errStr = err.Error()
}
return madmin.TraceInfo{
TraceType: madmin.TraceRebalance,
Time: startTime,
NodeName: globalLocalNodeName,
FuncName: fmt.Sprintf("rebalance.%s (pool-id=%d)", r.String(), poolIdx),
Duration: duration,
Path: path,
Error: errStr,
}
}
func (p *rebalanceMetrics) log(r rebalanceMetric, poolIdx int, paths ...string) func(err error) {
startTime := time.Now()
return func(err error) {
duration := time.Since(startTime)
if globalTrace.NumSubscribers(madmin.TraceRebalance) > 0 {
globalTrace.Publish(rebalanceTrace(r, poolIdx, startTime, duration, err, strings.Join(paths, " ")))
}
}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package versioning
import (
"encoding/xml"
"io"
"strings"
"github.com/minio/pkg/wildcard"
)
// State - enabled/disabled/suspended states
// for multifactor and status of versioning.
type State string
// Various supported states
const (
Enabled State = "Enabled"
// Disabled State = "Disabled" only used by MFA Delete not supported yet.
Suspended State = "Suspended"
)
var (
errExcludedPrefixNotSupported = Errorf("excluded prefixes extension supported only when versioning is enabled")
errTooManyExcludedPrefixes = Errorf("too many excluded prefixes")
)
// ExcludedPrefix - holds individual prefixes excluded from being versioned.
type ExcludedPrefix struct {
Prefix string
}
// Versioning - Configuration for bucket versioning.
type Versioning struct {
XMLNS string `xml:"xmlns,attr,omitempty"`
XMLName xml.Name `xml:"VersioningConfiguration"`
// MFADelete State `xml:"MFADelete,omitempty"` // not supported yet.
Status State `xml:"Status,omitempty"`
// MinIO extension - allows selective, prefix-level versioning exclusion.
// Requires versioning to be enabled
ExcludedPrefixes []ExcludedPrefix `xml:",omitempty"`
ExcludeFolders bool `xml:",omitempty"`
}
// Validate - validates the versioning configuration
func (v Versioning) Validate() error {
// Not supported yet
// switch v.MFADelete {
// case Enabled, Disabled:
// default:
// return Errorf("unsupported MFADelete state %s", v.MFADelete)
// }
switch v.Status {
case Enabled:
const maxExcludedPrefixes = 10
if len(v.ExcludedPrefixes) > maxExcludedPrefixes {
return errTooManyExcludedPrefixes
}
case Suspended:
if len(v.ExcludedPrefixes) > 0 {
return errExcludedPrefixNotSupported
}
default:
return Errorf("unsupported Versioning status %s", v.Status)
}
return nil
}
// Enabled - returns true if versioning is enabled
func (v Versioning) Enabled() bool {
return v.Status == Enabled
}
// Versioned returns if 'prefix' has versioning enabled or suspended.
func (v Versioning) Versioned(prefix string) bool {
return v.PrefixEnabled(prefix) || v.PrefixSuspended(prefix)
}
// PrefixEnabled - returns true if versioning is enabled at the bucket and given
// prefix, false otherwise.
func (v Versioning) PrefixEnabled(prefix string) bool {
if v.Status != Enabled {
return false
}
if prefix == "" {
return true
}
if v.ExcludeFolders && strings.HasSuffix(prefix, "/") {
return false
}
for _, sprefix := range v.ExcludedPrefixes {
// Note: all excluded prefix patterns end with `/` (See Validate)
sprefix.Prefix += "*"
if matched := wildcard.MatchSimple(sprefix.Prefix, prefix); matched {
return false
}
}
return true
}
// Suspended - returns true if versioning is suspended
func (v Versioning) Suspended() bool {
return v.Status == Suspended
}
// PrefixSuspended - returns true if versioning is suspended at the bucket level
// or suspended on the given prefix.
func (v Versioning) PrefixSuspended(prefix string) bool {
if v.Status == Suspended {
return true
}
if v.Status == Enabled {
if prefix == "" {
return false
}
if v.ExcludeFolders && strings.HasSuffix(prefix, "/") {
return true
}
for _, sprefix := range v.ExcludedPrefixes {
// Note: all excluded prefix patterns end with `/` (See Validate)
sprefix.Prefix += "*"
if matched := wildcard.MatchSimple(sprefix.Prefix, prefix); matched {
return true
}
}
}
return false
}
// PrefixesExcluded returns true if v contains one or more excluded object
// prefixes or if ExcludeFolders is true.
func (v Versioning) PrefixesExcluded() bool {
return len(v.ExcludedPrefixes) > 0 || v.ExcludeFolders
}
// ParseConfig - parses data in given reader to VersioningConfiguration.
func ParseConfig(reader io.Reader) (*Versioning, error) {
var v Versioning
if err := xml.NewDecoder(reader).Decode(&v); err != nil {
return nil, err
}
if err := v.Validate(); err != nil {
return nil, err
}
return &v, nil
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package init
import (
"os"
"github.com/klauspost/cpuid/v2"
)
func init() {
// All MinIO operations must be under UTC.
os.Setenv("TZ", "UTC")
// Temporary workaround for
// https://github.com/golang/go/issues/49233
// Keep until upstream has been fixed.
cpuid.CPU.Disable(cpuid.AVX512F, cpuid.AVX512BW, cpuid.AVX512CD, cpuid.AVX512DQ,
cpuid.AVX512ER, cpuid.AVX512FP16, cpuid.AVX512IFMA, cpuid.AVX512PF, cpuid.AVX512VBMI,
cpuid.AVX512VBMI2, cpuid.AVX512VL, cpuid.AVX512VNNI, cpuid.AVX512VP2INTERSECT, cpuid.AVX512VPOPCNTDQ)
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"encoding/json"
"errors"
"fmt"
"path"
"sort"
"strings"
jsoniter "github.com/json-iterator/go"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/kms"
)
const (
minioConfigPrefix = "config"
minioConfigBucket = minioMetaBucket + SlashSeparator + minioConfigPrefix
kvPrefix = ".kv"
// Captures all the previous SetKV operations and allows rollback.
minioConfigHistoryPrefix = minioConfigPrefix + "/history"
// MinIO configuration file.
minioConfigFile = "config.json"
)
func listServerConfigHistory(ctx context.Context, objAPI ObjectLayer, withData bool, count int) (
[]madmin.ConfigHistoryEntry, error,
) {
var configHistory []madmin.ConfigHistoryEntry
// List all kvs
marker := ""
for {
res, err := objAPI.ListObjects(ctx, minioMetaBucket, minioConfigHistoryPrefix, marker, "", maxObjectList)
if err != nil {
return nil, err
}
for _, obj := range res.Objects {
cfgEntry := madmin.ConfigHistoryEntry{
RestoreID: strings.TrimSuffix(path.Base(obj.Name), kvPrefix),
CreateTime: obj.ModTime, // ModTime is createTime for config history entries.
}
if withData {
data, err := readConfig(ctx, objAPI, obj.Name)
if err != nil {
// ignore history file if not readable.
continue
}
data, err = decryptData(data, obj.Name)
if err != nil {
// ignore history file that cannot be loaded.
continue
}
cfgEntry.Data = string(data)
}
configHistory = append(configHistory, cfgEntry)
count--
if count == 0 {
break
}
}
if !res.IsTruncated {
// We are done here
break
}
marker = res.NextMarker
}
sort.Slice(configHistory, func(i, j int) bool {
return configHistory[i].CreateTime.Before(configHistory[j].CreateTime)
})
return configHistory, nil
}
func delServerConfigHistory(ctx context.Context, objAPI ObjectLayer, uuidKV string) error {
historyFile := pathJoin(minioConfigHistoryPrefix, uuidKV+kvPrefix)
_, err := objAPI.DeleteObject(ctx, minioMetaBucket, historyFile, ObjectOptions{
DeletePrefix: true,
})
return err
}
func readServerConfigHistory(ctx context.Context, objAPI ObjectLayer, uuidKV string) ([]byte, error) {
historyFile := pathJoin(minioConfigHistoryPrefix, uuidKV+kvPrefix)
data, err := readConfig(ctx, objAPI, historyFile)
if err != nil {
return nil, err
}
return decryptData(data, historyFile)
}
func saveServerConfigHistory(ctx context.Context, objAPI ObjectLayer, kv []byte) error {
uuidKV := mustGetUUID() + kvPrefix
historyFile := pathJoin(minioConfigHistoryPrefix, uuidKV)
if GlobalKMS != nil {
var err error
kv, err = config.EncryptBytes(GlobalKMS, kv, kms.Context{
minioMetaBucket: path.Join(minioMetaBucket, historyFile),
})
if err != nil {
return err
}
}
return saveConfig(ctx, objAPI, historyFile, kv)
}
func saveServerConfig(ctx context.Context, objAPI ObjectLayer, cfg interface{}) error {
data, err := json.Marshal(cfg)
if err != nil {
return err
}
configFile := path.Join(minioConfigPrefix, minioConfigFile)
if GlobalKMS != nil {
data, err = config.EncryptBytes(GlobalKMS, data, kms.Context{
minioMetaBucket: path.Join(minioMetaBucket, configFile),
})
if err != nil {
return err
}
}
return saveConfig(ctx, objAPI, configFile, data)
}
// data is optional. If nil it will be loaded from backend.
func readServerConfig(ctx context.Context, objAPI ObjectLayer, data []byte) (config.Config, error) {
srvCfg := config.New()
var err error
if len(data) == 0 {
configFile := path.Join(minioConfigPrefix, minioConfigFile)
data, err = readConfig(ctx, objAPI, configFile)
if err != nil {
if errors.Is(err, errConfigNotFound) {
lookupConfigs(srvCfg, objAPI)
return srvCfg, nil
}
return nil, err
}
data, err = decryptData(data, configFile)
if err != nil {
lookupConfigs(srvCfg, objAPI)
return nil, err
}
}
json := jsoniter.ConfigCompatibleWithStandardLibrary
if err := json.Unmarshal(data, &srvCfg); err != nil {
return nil, err
}
// Add any missing entries
return srvCfg.Merge(), nil
}
// ConfigSys - config system.
type ConfigSys struct{}
// Init - initializes config system from config.json.
func (sys *ConfigSys) Init(objAPI ObjectLayer) error {
if objAPI == nil {
return errInvalidArgument
}
return initConfig(objAPI)
}
// NewConfigSys - creates new config system object.
func NewConfigSys() *ConfigSys {
return &ConfigSys{}
}
// Initialize and load config from remote etcd or local config directory
func initConfig(objAPI ObjectLayer) (err error) {
bootstrapTrace("load the configuration")
defer func() {
if err != nil {
bootstrapTrace(fmt.Sprintf("loading configuration failed: %v", err))
}
}()
if objAPI == nil {
return errServerNotInitialized
}
srvCfg, err := readConfigWithoutMigrate(GlobalContext, objAPI)
if err != nil {
return err
}
bootstrapTrace("lookup the configuration")
// Override any values from ENVs.
lookupConfigs(srvCfg, objAPI)
// hold the mutex lock before a new config is assigned.
globalServerConfigMu.Lock()
globalServerConfig = srvCfg
globalServerConfigMu.Unlock()
return nil
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
// Package deadlineconn implements net.Conn wrapper with configured deadlines.
package deadlineconn
import (
"net"
"time"
)
// DeadlineConn - is a generic stream-oriented network connection supporting buffered reader and read/write timeout.
type DeadlineConn struct {
net.Conn
readDeadline time.Duration // sets the read deadline on a connection.
writeDeadline time.Duration // sets the write deadline on a connection.
}
// Sets read deadline
func (c *DeadlineConn) setReadDeadline() {
if c.readDeadline > 0 {
c.SetReadDeadline(time.Now().UTC().Add(c.readDeadline))
}
}
func (c *DeadlineConn) setWriteDeadline() {
if c.writeDeadline > 0 {
c.SetWriteDeadline(time.Now().UTC().Add(c.writeDeadline))
}
}
// Read - reads data from the connection using wrapped buffered reader.
func (c *DeadlineConn) Read(b []byte) (n int, err error) {
c.setReadDeadline()
n, err = c.Conn.Read(b)
return n, err
}
// Write - writes data to the connection.
func (c *DeadlineConn) Write(b []byte) (n int, err error) {
c.setWriteDeadline()
n, err = c.Conn.Write(b)
return n, err
}
// WithReadDeadline sets a new read side net.Conn deadline.
func (c *DeadlineConn) WithReadDeadline(d time.Duration) *DeadlineConn {
c.readDeadline = d
return c
}
// WithWriteDeadline sets a new write side net.Conn deadline.
func (c *DeadlineConn) WithWriteDeadline(d time.Duration) *DeadlineConn {
c.writeDeadline = d
return c
}
// New - creates a new connection object wrapping net.Conn with deadlines.
func New(c net.Conn) *DeadlineConn {
return &DeadlineConn{
Conn: c,
}
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"fmt"
"math/rand"
"time"
"github.com/minio/minio/internal/logger"
"github.com/tidwall/gjson"
)
const (
licUpdateCycle = 24 * time.Hour * 30
licRenewURL = "https://subnet.min.io/api/cluster/renew-license"
licRenewURLDev = "http://localhost:9000/api/cluster/renew-license"
)
// initlicenseUpdateJob start the periodic license update job in the background.
func initLicenseUpdateJob(ctx context.Context, objAPI ObjectLayer) {
go func() {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
// Leader node (that successfully acquires the lock inside licenceUpdaterLoop)
// will keep performing the license update. If the leader goes down for some
// reason, the lock will be released and another node will acquire it and
// take over because of this loop.
for {
licenceUpdaterLoop(ctx, objAPI)
// license update stopped for some reason.
// sleep for some time and try again.
duration := time.Duration(r.Float64() * float64(time.Hour))
if duration < time.Second {
// Make sure to sleep atleast a second to avoid high CPU ticks.
duration = time.Second
}
time.Sleep(duration)
}
}()
}
func licenceUpdaterLoop(ctx context.Context, objAPI ObjectLayer) {
ctx, cancel := globalLeaderLock.GetLock(ctx)
defer cancel()
licenseUpdateTimer := time.NewTimer(licUpdateCycle)
defer licenseUpdateTimer.Stop()
for {
select {
case <-ctx.Done():
return
case <-licenseUpdateTimer.C:
if globalSubnetConfig.Registered() {
performLicenseUpdate(ctx, objAPI)
}
// Reset the timer for next cycle.
licenseUpdateTimer.Reset(licUpdateCycle)
}
}
}
func performLicenseUpdate(ctx context.Context, objectAPI ObjectLayer) {
// the subnet license renewal api renews the license only
// if required e.g. when it is expiring soon
url := licRenewURL
if globalIsCICD {
url = licRenewURLDev
}
resp, err := globalSubnetConfig.Post(url, nil)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("error from %s: %w", url, err))
return
}
r := gjson.Parse(resp).Get("license")
if r.Index == 0 {
logger.LogIf(ctx, fmt.Errorf("license not found in response from %s", url))
return
}
lic := r.String()
if lic == globalSubnetConfig.License {
// license hasn't changed.
return
}
kv := "subnet license=" + lic
result, err := setConfigKV(ctx, objectAPI, []byte(kv))
if err != nil {
logger.LogIf(ctx, fmt.Errorf("error setting subnet license config: %w", err))
return
}
if result.Dynamic {
if err := applyDynamicConfigForSubSys(GlobalContext, objectAPI, result.Cfg, result.SubSys); err != nil {
logger.LogIf(ctx, fmt.Errorf("error applying subnet dynamic config: %w", err))
return
}
globalNotificationSys.SignalConfigReload(result.SubSys)
}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package tls
import (
"strconv"
"time"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/config"
"github.com/minio/pkg/env"
)
const (
// EnvIdentityTLSEnabled is an environment variable that controls whether the X.509
// TLS STS API is enabled. By default, if not set, it is enabled.
EnvIdentityTLSEnabled = "MINIO_IDENTITY_TLS_ENABLE"
// EnvIdentityTLSSkipVerify is an environment variable that controls whether
// MinIO verifies the client certificate present by the client
// when requesting temp. credentials.
// By default, MinIO always verify the client certificate.
//
// The client certificate verification should only be skipped
// when debugging or testing a setup since it allows arbitrary
// clients to obtain temp. credentials with arbitrary policy
// permissions - including admin permissions.
EnvIdentityTLSSkipVerify = "MINIO_IDENTITY_TLS_SKIP_VERIFY"
)
// Config contains the STS TLS configuration for generating temp.
// credentials and mapping client certificates to S3 policies.
type Config struct {
Enabled bool `json:"enabled"`
// InsecureSkipVerify, if set to true, disables the client
// certificate verification. It should only be set for
// debugging or testing purposes.
InsecureSkipVerify bool `json:"skip_verify"`
}
const (
defaultExpiry time.Duration = 1 * time.Hour
minExpiry time.Duration = 15 * time.Minute
maxExpiry time.Duration = 365 * 24 * time.Hour
)
// GetExpiryDuration - return parsed expiry duration.
func (l Config) GetExpiryDuration(dsecs string) (time.Duration, error) {
if dsecs == "" {
return defaultExpiry, nil
}
d, err := strconv.Atoi(dsecs)
if err != nil {
return 0, auth.ErrInvalidDuration
}
dur := time.Duration(d) * time.Second
if dur < minExpiry || dur > maxExpiry {
return 0, auth.ErrInvalidDuration
}
return dur, nil
}
// Lookup returns a new Config by merging the given K/V config
// system with environment variables.
func Lookup(kvs config.KVS) (Config, error) {
if err := config.CheckValidKeys(config.IdentityTLSSubSys, kvs, DefaultKVS); err != nil {
return Config{}, err
}
cfg := Config{}
var err error
v := env.Get(EnvIdentityTLSEnabled, "")
if v == "" {
return cfg, nil
}
cfg.Enabled, err = config.ParseBool(v)
if err != nil {
return Config{}, err
}
cfg.InsecureSkipVerify, err = config.ParseBool(env.Get(EnvIdentityTLSSkipVerify, kvs.Get(skipVerify)))
if err != nil {
return Config{}, err
}
return cfg, nil
}
const (
skipVerify = "skip_verify"
)
// DefaultKVS is the default K/V config system for
// the STS TLS API.
var DefaultKVS = config.KVS{
config.KV{
Key: skipVerify,
Value: "off",
},
}
// Help is the help and description for the STS API K/V configuration.
var Help = config.HelpKVS{
config.HelpKV{
Key: skipVerify,
Description: `trust client certificates without verification (default: 'off')`,
Optional: true,
Type: "on|off",
},
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package lifecycle
import "testing"
func Test_NoncurrentVersionsExpiration_Validation(t *testing.T) {
testcases := []struct {
n NoncurrentVersionExpiration
err error
}{
{
n: NoncurrentVersionExpiration{
NoncurrentDays: 0,
NewerNoncurrentVersions: 0,
set: true,
},
err: errXMLNotWellFormed,
},
{
n: NoncurrentVersionExpiration{
NoncurrentDays: 90,
NewerNoncurrentVersions: 0,
set: true,
},
err: nil,
},
{
n: NoncurrentVersionExpiration{
NoncurrentDays: 90,
NewerNoncurrentVersions: 2,
set: true,
},
err: nil,
},
{
n: NoncurrentVersionExpiration{
NoncurrentDays: -1,
set: true,
},
err: errXMLNotWellFormed,
},
{
n: NoncurrentVersionExpiration{
NoncurrentDays: 90,
NewerNoncurrentVersions: -2,
set: true,
},
err: errXMLNotWellFormed,
},
// MinIO extension: supports zero NoncurrentDays when NewerNoncurrentVersions > 0
{
n: NoncurrentVersionExpiration{
NoncurrentDays: 0,
NewerNoncurrentVersions: 5,
set: true,
},
err: nil,
},
}
for i, tc := range testcases {
if got := tc.n.Validate(); got != tc.err {
t.Fatalf("%d: expected %v but got %v", i+1, tc.err, got)
}
}
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bufio"
"bytes"
"flag"
"fmt"
"log"
"math"
"os"
"path"
"regexp"
"strconv"
"strings"
"time"
)
var (
re *regexp.Regexp
goTime, less, margin time.Duration
)
func init() {
re = regexp.MustCompile(`^goroutine [0-9]+ \[[^,]+(, ([0-9]+) minutes)?\]:$`)
flag.DurationVar(&less, "less", 0, "goroutine waiting less than the specified time")
flag.DurationVar(&goTime, "time", 0, "goroutine waiting for exactly the specified time")
flag.DurationVar(&margin, "margin", 0, "margin time")
}
func parseGoroutineType2(path string) (map[time.Duration][]string, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
bf := bytes.Buffer{}
save := func(s string) {
bf.WriteString(s + "\n")
}
reset := func() {
bf.Reset()
}
ret := make(map[time.Duration][]string)
s := bufio.NewScanner(f)
s.Split(bufio.ScanLines)
var (
t time.Duration
skip, record bool
)
for s.Scan() {
line := s.Text()
switch {
case skip && line != "":
case skip && line == "":
skip = false
case record && line == "":
record = false
ret[t] = append(ret[t], bf.String())
reset()
case record:
save(line)
default:
z := re.FindStringSubmatch(line)
if len(z) == 3 {
if z[2] != "" {
a, _ := strconv.Atoi(z[2])
t = time.Duration(a) * time.Minute
save(line)
record = true
} else {
skip = true
}
}
}
}
return ret, nil
}
const helpUsage = `
At least one argument is required to run this tool.
EXAMPLE:
./pprofgoparser --time 50m --margin 1m /path/to/*-goroutines-before,debug=2.txt
`
func main() {
flag.Parse()
if len(flag.Args()) == 0 {
log.Fatal(helpUsage)
}
for _, arg := range flag.Args() {
if !strings.HasSuffix(arg, "-goroutines-before,debug=2.txt") {
continue
}
r, err := parseGoroutineType2(arg)
if err != nil {
log.Fatal(err)
}
profFName := path.Base(arg)
fmt.Println(strings.Repeat("=", len(profFName)))
fmt.Println(profFName)
fmt.Println(strings.Repeat("=", len(profFName)))
fmt.Println("")
for t, stacks := range r {
if less != 0 && t >= less {
continue
}
if goTime == 0 || math.Abs(float64(t)-float64(goTime)) <= float64(margin) {
for _, stack := range stacks {
fmt.Println(stack)
}
}
}
}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"encoding/binary"
"math"
"sync"
"time"
"github.com/minio/minio/internal/bucket/replication"
)
func (b *BucketReplicationStats) hasReplicationUsage() bool {
for _, s := range b.Stats {
if s.hasReplicationUsage() {
return true
}
}
return false
}
// ReplicationStats holds the global in-memory replication stats
type ReplicationStats struct {
Cache map[string]*BucketReplicationStats
UsageCache map[string]*BucketReplicationStats
mostRecentStats BucketStatsMap
sync.RWMutex // mutex for Cache
ulock sync.RWMutex // mutex for UsageCache
mostRecentStatsMu sync.Mutex // mutex for mostRecentStats
}
// Delete deletes in-memory replication statistics for a bucket.
func (r *ReplicationStats) Delete(bucket string) {
if r == nil {
return
}
r.Lock()
defer r.Unlock()
delete(r.Cache, bucket)
r.ulock.Lock()
defer r.ulock.Unlock()
delete(r.UsageCache, bucket)
}
// UpdateReplicaStat updates in-memory replica statistics with new values.
func (r *ReplicationStats) UpdateReplicaStat(bucket string, n int64) {
if r == nil {
return
}
r.Lock()
defer r.Unlock()
bs, ok := r.Cache[bucket]
if !ok {
bs = &BucketReplicationStats{Stats: make(map[string]*BucketReplicationStat)}
}
bs.ReplicaSize += n
r.Cache[bucket] = bs
}
// Update updates in-memory replication statistics with new values.
func (r *ReplicationStats) Update(bucket string, arn string, n int64, duration time.Duration, status, prevStatus replication.StatusType, opType replication.Type) {
if r == nil {
return
}
r.Lock()
defer r.Unlock()
bs, ok := r.Cache[bucket]
if !ok {
bs = &BucketReplicationStats{Stats: make(map[string]*BucketReplicationStat)}
r.Cache[bucket] = bs
}
b, ok := bs.Stats[arn]
if !ok {
b = &BucketReplicationStat{}
bs.Stats[arn] = b
}
switch status {
case replication.Pending:
if opType.IsDataReplication() && prevStatus != status {
b.PendingSize += n
b.PendingCount++
}
case replication.Completed:
switch prevStatus { // adjust counters based on previous state
case replication.Pending:
b.PendingCount--
case replication.Failed:
b.FailedCount--
}
if opType.IsDataReplication() {
b.ReplicatedSize += n
switch prevStatus {
case replication.Pending:
b.PendingSize -= n
case replication.Failed:
b.FailedSize -= n
}
if duration > 0 {
b.Latency.update(n, duration)
}
}
case replication.Failed:
if opType.IsDataReplication() {
if prevStatus == replication.Pending {
b.FailedSize += n
b.FailedCount++
b.PendingSize -= n
b.PendingCount--
}
}
case replication.Replica:
if opType == replication.ObjectReplicationType {
b.ReplicaSize += n
}
}
}
// GetInitialUsage get replication metrics available at the time of cluster initialization
func (r *ReplicationStats) GetInitialUsage(bucket string) BucketReplicationStats {
if r == nil {
return BucketReplicationStats{}
}
r.ulock.RLock()
defer r.ulock.RUnlock()
st, ok := r.UsageCache[bucket]
if !ok {
return BucketReplicationStats{}
}
return st.Clone()
}
// GetAll returns replication metrics for all buckets at once.
func (r *ReplicationStats) GetAll() map[string]BucketReplicationStats {
if r == nil {
return map[string]BucketReplicationStats{}
}
r.RLock()
defer r.RUnlock()
bucketReplicationStats := make(map[string]BucketReplicationStats, len(r.Cache))
for k, v := range r.Cache {
bucketReplicationStats[k] = v.Clone()
}
return bucketReplicationStats
}
// Get replication metrics for a bucket from this node since this node came up.
func (r *ReplicationStats) Get(bucket string) BucketReplicationStats {
if r == nil {
return BucketReplicationStats{Stats: make(map[string]*BucketReplicationStat)}
}
r.RLock()
defer r.RUnlock()
st, ok := r.Cache[bucket]
if !ok {
return BucketReplicationStats{}
}
return st.Clone()
}
// NewReplicationStats initialize in-memory replication statistics
func NewReplicationStats(ctx context.Context, objectAPI ObjectLayer) *ReplicationStats {
return &ReplicationStats{
Cache: make(map[string]*BucketReplicationStats),
UsageCache: make(map[string]*BucketReplicationStats),
}
}
// load replication metrics at cluster start from latest replication stats saved in .minio.sys/buckets/replication/node-name.stats
// fallback to replication stats in data usage to be backward compatible
func (r *ReplicationStats) loadInitialReplicationMetrics(ctx context.Context) {
m := make(map[string]*BucketReplicationStats)
if stats, err := globalReplicationPool.loadStatsFromDisk(); err == nil {
for b, st := range stats {
c := st.Clone()
m[b] = &c
}
r.ulock.Lock()
r.UsageCache = m
r.ulock.Unlock()
return
}
rTimer := time.NewTimer(time.Second * 5)
defer rTimer.Stop()
var (
dui DataUsageInfo
err error
)
outer:
for {
select {
case <-ctx.Done():
return
case <-rTimer.C:
dui, err = loadDataUsageFromBackend(GlobalContext, newObjectLayerFn())
// If LastUpdate is set, data usage is available.
if err == nil {
break outer
}
rTimer.Reset(time.Second * 5)
}
}
for bucket, usage := range dui.BucketsUsage {
b := &BucketReplicationStats{
Stats: make(map[string]*BucketReplicationStat, len(usage.ReplicationInfo)),
}
for arn, uinfo := range usage.ReplicationInfo {
b.Stats[arn] = &BucketReplicationStat{
FailedSize: int64(uinfo.ReplicationFailedSize),
ReplicatedSize: int64(uinfo.ReplicatedSize),
ReplicaSize: int64(uinfo.ReplicaSize),
FailedCount: int64(uinfo.ReplicationFailedCount),
}
}
b.ReplicaSize += int64(usage.ReplicaSize)
if b.hasReplicationUsage() {
m[bucket] = b
}
}
r.ulock.Lock()
r.UsageCache = m
r.ulock.Unlock()
}
// serializeStats will serialize the current stats.
// Will return (nil, nil) if no data.
func (r *ReplicationStats) serializeStats() ([]byte, error) {
if r == nil {
return nil, nil
}
r.mostRecentStatsMu.Lock()
defer r.mostRecentStatsMu.Unlock()
if len(r.mostRecentStats.Stats) == 0 {
return nil, nil
}
data := make([]byte, 4, 4+r.mostRecentStats.Msgsize())
// Add the replication stats meta header.
binary.LittleEndian.PutUint16(data[0:2], replStatsMetaFormat)
binary.LittleEndian.PutUint16(data[2:4], replStatsVersion)
// Add data
return r.mostRecentStats.MarshalMsg(data)
}
func (r *ReplicationStats) getAllLatest(bucketsUsage map[string]BucketUsageInfo) (bucketsReplicationStats map[string]BucketReplicationStats) {
peerBucketStatsList := globalNotificationSys.GetClusterAllBucketStats(GlobalContext)
bucketsReplicationStats = make(map[string]BucketReplicationStats, len(bucketsUsage))
for bucket, u := range bucketsUsage {
bucketStats := make([]BucketStats, len(peerBucketStatsList))
for i, peerBucketStats := range peerBucketStatsList {
bucketStat, ok := peerBucketStats.Stats[bucket]
if !ok {
continue
}
bucketStats[i] = bucketStat
}
bucketsReplicationStats[bucket] = r.calculateBucketReplicationStats(bucket, u, bucketStats)
}
return bucketsReplicationStats
}
func (r *ReplicationStats) calculateBucketReplicationStats(bucket string, u BucketUsageInfo, bucketStats []BucketStats) (s BucketReplicationStats) {
if r == nil {
s = BucketReplicationStats{
Stats: make(map[string]*BucketReplicationStat),
}
return s
}
// accumulate cluster bucket stats
stats := make(map[string]*BucketReplicationStat)
var totReplicaSize int64
for _, bucketStat := range bucketStats {
totReplicaSize += bucketStat.ReplicationStats.ReplicaSize
for arn, stat := range bucketStat.ReplicationStats.Stats {
oldst := stats[arn]
if oldst == nil {
oldst = &BucketReplicationStat{}
}
stats[arn] = &BucketReplicationStat{
FailedCount: stat.FailedCount + oldst.FailedCount,
FailedSize: stat.FailedSize + oldst.FailedSize,
ReplicatedSize: stat.ReplicatedSize + oldst.ReplicatedSize,
Latency: stat.Latency.merge(oldst.Latency),
PendingCount: stat.PendingCount + oldst.PendingCount,
PendingSize: stat.PendingSize + oldst.PendingSize,
}
}
}
// add initial usage stat to cluster stats
usageStat := globalReplicationStats.GetInitialUsage(bucket)
totReplicaSize += usageStat.ReplicaSize
for arn, stat := range usageStat.Stats {
st, ok := stats[arn]
if !ok {
st = &BucketReplicationStat{}
stats[arn] = st
}
st.ReplicatedSize += stat.ReplicatedSize
st.FailedSize += stat.FailedSize
st.FailedCount += stat.FailedCount
st.PendingSize += stat.PendingSize
st.PendingCount += stat.PendingCount
}
s = BucketReplicationStats{
Stats: make(map[string]*BucketReplicationStat, len(stats)),
}
var latestTotReplicatedSize int64
for _, st := range u.ReplicationInfo {
latestTotReplicatedSize += int64(st.ReplicatedSize)
}
// normalize computed real time stats with latest usage stat
for arn, tgtstat := range stats {
st := BucketReplicationStat{}
bu, ok := u.ReplicationInfo[arn]
if !ok {
bu = BucketTargetUsageInfo{}
}
// use in memory replication stats if it is ahead of usage info.
st.ReplicatedSize = int64(bu.ReplicatedSize)
if tgtstat.ReplicatedSize >= int64(bu.ReplicatedSize) {
st.ReplicatedSize = tgtstat.ReplicatedSize
}
s.ReplicatedSize += st.ReplicatedSize
// Reset FailedSize and FailedCount to 0 for negative overflows which can
// happen since data usage picture can lag behind actual usage state at the time of cluster start
st.FailedSize = int64(math.Max(float64(tgtstat.FailedSize), 0))
st.FailedCount = int64(math.Max(float64(tgtstat.FailedCount), 0))
st.PendingSize = int64(math.Max(float64(tgtstat.PendingSize), 0))
st.PendingCount = int64(math.Max(float64(tgtstat.PendingCount), 0))
st.Latency = tgtstat.Latency
s.Stats[arn] = &st
s.FailedSize += st.FailedSize
s.FailedCount += st.FailedCount
s.PendingCount += st.PendingCount
s.PendingSize += st.PendingSize
}
// normalize overall stats
s.ReplicaSize = int64(math.Max(float64(totReplicaSize), float64(u.ReplicaSize)))
s.ReplicatedSize = int64(math.Max(float64(s.ReplicatedSize), float64(latestTotReplicatedSize)))
r.mostRecentStatsMu.Lock()
if len(r.mostRecentStats.Stats) == 0 {
r.mostRecentStats = BucketStatsMap{Stats: make(map[string]BucketStats, 1), Timestamp: UTCNow()}
}
if len(s.Stats) > 0 {
r.mostRecentStats.Stats[bucket] = BucketStats{ReplicationStats: s}
}
r.mostRecentStats.Timestamp = UTCNow()
r.mostRecentStatsMu.Unlock()
return s
}
// get the most current of in-memory replication stats and data usage info from crawler.
func (r *ReplicationStats) getLatestReplicationStats(bucket string, u BucketUsageInfo) (s BucketReplicationStats) {
bucketStats := globalNotificationSys.GetClusterBucketStats(GlobalContext, bucket)
return r.calculateBucketReplicationStats(bucket, u, bucketStats)
}
<file_sep>module github.com/minio/minio/docs/debugging/inspect
go 1.19
require (
github.com/klauspost/compress v1.15.9
github.com/minio/colorjson v1.0.2
github.com/minio/madmin-go/v3 v3.0.2
github.com/secure-io/sio-go v0.3.1
github.com/tinylib/msgp v1.1.8
)
require (
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/minio/pkg v1.1.20 // indirect
github.com/philhofer/fwd v1.1.2 // indirect
golang.org/x/crypto v0.6.0 // indirect
golang.org/x/sys v0.5.0 // indirect
)
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package audit
import (
"net/http"
"strings"
"time"
"github.com/minio/pkg/logger/message/audit"
"github.com/minio/minio/internal/handlers"
xhttp "github.com/minio/minio/internal/http"
)
// Version - represents the current version of audit log structure.
const Version = "1"
// NewEntry - constructs an audit entry object with some fields filled
func NewEntry(deploymentID string) audit.Entry {
return audit.Entry{
Version: Version,
DeploymentID: deploymentID,
Time: time.Now().UTC(),
}
}
// ToEntry - constructs an audit entry from a http request
func ToEntry(w http.ResponseWriter, r *http.Request, reqClaims map[string]interface{}, deploymentID string) audit.Entry {
entry := NewEntry(deploymentID)
entry.RemoteHost = handlers.GetSourceIP(r)
entry.UserAgent = r.UserAgent()
entry.ReqClaims = reqClaims
entry.ReqHost = r.Host
entry.ReqPath = r.URL.Path
q := r.URL.Query()
reqQuery := make(map[string]string, len(q))
for k, v := range q {
reqQuery[k] = strings.Join(v, ",")
}
entry.ReqQuery = reqQuery
reqHeader := make(map[string]string, len(r.Header))
for k, v := range r.Header {
reqHeader[k] = strings.Join(v, ",")
}
entry.ReqHeader = reqHeader
wh := w.Header()
entry.RequestID = wh.Get(xhttp.AmzRequestID)
respHeader := make(map[string]string, len(wh))
for k, v := range wh {
respHeader[k] = strings.Join(v, ",")
}
entry.RespHeader = respHeader
if etag := respHeader[xhttp.ETag]; etag != "" {
respHeader[xhttp.ETag] = strings.Trim(etag, `"`)
}
return entry
}
<file_sep>//go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"encoding/json"
"errors"
"fmt"
"log"
"net/http"
)
func writeErrorResponse(w http.ResponseWriter, err error) {
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(map[string]string{
"reason": fmt.Sprintf("%v", err),
})
}
type Resp struct {
User string `json:"user"`
MaxValiditySeconds int `json:"maxValiditySeconds"`
Claims map[string]interface{} `json:"claims"`
}
var tokens map[string]Resp = map[string]Resp{
"aaa": {
User: "Alice",
MaxValiditySeconds: 3600,
Claims: map[string]interface{}{
"groups": []string{"data-science"},
},
},
"bbb": {
User: "Bart",
MaxValiditySeconds: 3600,
Claims: map[string]interface{}{
"groups": []string{"databases"},
},
},
}
func mainHandler(w http.ResponseWriter, r *http.Request) {
token := r.FormValue("token")
if token == "" {
writeErrorResponse(w, errors.New("token parameter not given"))
return
}
rsp, ok := tokens[token]
if !ok {
w.WriteHeader(http.StatusForbidden)
return
}
fmt.Printf("Allowed for token: %s user: %s\n", token, rsp.User)
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(rsp)
return
}
func main() {
http.HandleFunc("/", mainHandler)
log.Print("Listing on :8081")
log.Fatal(http.ListenAndServe(":8081", nil))
}
<file_sep>package pubsub
import (
"math"
"math/bits"
)
// Mask allows filtering by a bitset mask.
type Mask uint64
const (
// MaskAll is the mask for all entries.
MaskAll Mask = math.MaxUint64
)
// MaskFromMaskable extracts mask from an interface.
func MaskFromMaskable(m Maskable) Mask {
return Mask(m.Mask())
}
// Contains returns whether *all* flags in other is present in t.
func (t Mask) Contains(other Mask) bool {
return t&other == other
}
// Overlaps returns whether *any* flags in t overlaps with other.
func (t Mask) Overlaps(other Mask) bool {
return t&other != 0
}
// SingleType returns whether t has a single type set.
func (t Mask) SingleType() bool {
return bits.OnesCount64(uint64(t)) == 1
}
// FromUint64 will set a mask to the uint64 value.
func (t *Mask) FromUint64(m uint64) {
*t = Mask(m)
}
// Merge will merge other into t.
func (t *Mask) Merge(other Mask) {
*t |= other
}
// MergeMaskable will merge other into t.
func (t *Mask) MergeMaskable(other Maskable) {
*t |= Mask(other.Mask())
}
// SetIf will add other if b is true.
func (t *Mask) SetIf(b bool, other Mask) {
if b {
*t |= other
}
}
// Mask returns the mask as a uint64.
func (t Mask) Mask() uint64 {
return uint64(t)
}
// Maskable implementations must return their mask as a 64 bit uint.
type Maskable interface {
Mask() uint64
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package store
import (
"os"
"path/filepath"
"reflect"
"strings"
"testing"
)
type TestItem struct {
Name string `json:"Name"`
Property string `json:"property"`
}
var (
// TestDir
queueDir = filepath.Join(os.TempDir(), "minio_test")
// Sample test item.
testItem = TestItem{Name: "test-item", Property: "property"}
// Ext for test item
testItemExt = ".test"
)
// Initialize the queue store.
func setUpQueueStore(directory string, limit uint64) (Store[TestItem], error) {
queueStore := NewQueueStore[TestItem](queueDir, limit, testItemExt)
if oErr := queueStore.Open(); oErr != nil {
return nil, oErr
}
return queueStore, nil
}
// Tear down queue store.
func tearDownQueueStore() error {
return os.RemoveAll(queueDir)
}
// TestQueueStorePut - tests for store.Put
func TestQueueStorePut(t *testing.T) {
defer func() {
if err := tearDownQueueStore(); err != nil {
t.Fatal("Failed to tear down store ", err)
}
}()
store, err := setUpQueueStore(queueDir, 100)
if err != nil {
t.Fatal("Failed to create a queue store ", err)
}
// Put 100 items.
for i := 0; i < 100; i++ {
if err := store.Put(testItem); err != nil {
t.Fatal("Failed to put to queue store ", err)
}
}
// Count the items.
names, err := store.List()
if err != nil {
t.Fatal(err)
}
if len(names) != 100 {
t.Fatalf("List() Expected: 100, got %d", len(names))
}
}
// TestQueueStoreGet - tests for store.Get
func TestQueueStoreGet(t *testing.T) {
defer func() {
if err := tearDownQueueStore(); err != nil {
t.Fatal("Failed to tear down store ", err)
}
}()
store, err := setUpQueueStore(queueDir, 10)
if err != nil {
t.Fatal("Failed to create a queue store ", err)
}
// Put 10 items
for i := 0; i < 10; i++ {
if err := store.Put(testItem); err != nil {
t.Fatal("Failed to put to queue store ", err)
}
}
itemKeys, err := store.List()
if err != nil {
t.Fatal(err)
}
// Get 10 items.
if len(itemKeys) == 10 {
for _, key := range itemKeys {
item, eErr := store.Get(strings.TrimSuffix(key, testItemExt))
if eErr != nil {
t.Fatal("Failed to Get the item from the queue store ", eErr)
}
if !reflect.DeepEqual(testItem, item) {
t.Fatalf("Failed to read the item: error: expected = %v, got = %v", testItem, item)
}
}
} else {
t.Fatalf("List() Expected: 10, got %d", len(itemKeys))
}
}
// TestQueueStoreDel - tests for store.Del
func TestQueueStoreDel(t *testing.T) {
defer func() {
if err := tearDownQueueStore(); err != nil {
t.Fatal("Failed to tear down store ", err)
}
}()
store, err := setUpQueueStore(queueDir, 20)
if err != nil {
t.Fatal("Failed to create a queue store ", err)
}
// Put 20 items.
for i := 0; i < 20; i++ {
if err := store.Put(testItem); err != nil {
t.Fatal("Failed to put to queue store ", err)
}
}
itemKeys, err := store.List()
if err != nil {
t.Fatal(err)
}
// Remove all the items.
if len(itemKeys) == 20 {
for _, key := range itemKeys {
err := store.Del(strings.TrimSuffix(key, testItemExt))
if err != nil {
t.Fatal("queue store Del failed with ", err)
}
}
} else {
t.Fatalf("List() Expected: 20, got %d", len(itemKeys))
}
names, err := store.List()
if err != nil {
t.Fatal(err)
}
if len(names) != 0 {
t.Fatalf("List() Expected: 0, got %d", len(names))
}
}
// TestQueueStoreLimit - tests the item limit for the store.
func TestQueueStoreLimit(t *testing.T) {
defer func() {
if err := tearDownQueueStore(); err != nil {
t.Fatal("Failed to tear down store ", err)
}
}()
// The max limit is set to 5.
store, err := setUpQueueStore(queueDir, 5)
if err != nil {
t.Fatal("Failed to create a queue store ", err)
}
for i := 0; i < 5; i++ {
if err := store.Put(testItem); err != nil {
t.Fatal("Failed to put to queue store ", err)
}
}
// Should not allow 6th Put.
if err := store.Put(testItem); err == nil {
t.Fatalf("Expected to fail with %s, but passes", errLimitExceeded)
}
}
// TestQueueStoreLimit - tests for store.LimitN.
func TestQueueStoreListN(t *testing.T) {
defer func() {
if err := tearDownQueueStore(); err != nil {
t.Fatal("Failed to tear down store ", err)
}
}()
store, err := setUpQueueStore(queueDir, 10)
if err != nil {
t.Fatal("Failed to create a queue store ", err)
}
for i := 0; i < 10; i++ {
if err := store.Put(testItem); err != nil {
t.Fatal("Failed to put to queue store ", err)
}
}
// Should return all the item keys in the store.
names, err := store.List()
if err != nil {
t.Fatal(err)
}
if len(names) != 10 {
t.Fatalf("List() Expected: 10, got %d", len(names))
}
// re-open
store, err = setUpQueueStore(queueDir, 10)
if err != nil {
t.Fatal("Failed to create a queue store ", err)
}
names, err = store.List()
if err != nil {
t.Fatal(err)
}
if len(names) != 10 {
t.Fatalf("List() Expected: 10, got %d", len(names))
}
if len(names) != store.Len() {
t.Fatalf("List() Expected: 10, got %d", len(names))
}
// Delete all
for _, key := range names {
err := store.Del(key)
if err != nil {
t.Fatal(err)
}
}
// Re-list
lst, err := store.List()
if len(lst) > 0 || err != nil {
t.Fatalf("Expected List() to return empty list and no error, got %v err: %v", lst, err)
}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"errors"
"os"
"reflect"
"testing"
)
func Test_readFromSecret(t *testing.T) {
testCases := []struct {
content string
expectedErr bool
expectedValue string
}{
{
"value\n",
false,
"value",
},
{
" \t\n Hello, Gophers \n\t\r\n",
false,
"Hello, Gophers",
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
tmpfile, err := os.CreateTemp("", "testfile")
if err != nil {
t.Error(err)
}
tmpfile.WriteString(testCase.content)
tmpfile.Sync()
tmpfile.Close()
value, err := readFromSecret(tmpfile.Name())
if err != nil && !testCase.expectedErr {
t.Error(err)
}
if err == nil && testCase.expectedErr {
t.Error(errors.New("expected error, found success"))
}
if value != testCase.expectedValue {
t.Errorf("Expected %s, got %s", testCase.expectedValue, value)
}
})
}
}
func Test_minioEnvironFromFile(t *testing.T) {
testCases := []struct {
content string
expectedErr bool
expectedEkvs []envKV
}{
{
`
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=<PASSWORD>`,
false,
[]envKV{
{
Key: "MINIO_ROOT_USER",
Value: "minio",
},
{
Key: "MINIO_ROOT_PASSWORD",
Value: "<PASSWORD>",
},
},
},
// Value with double quotes
{
`export MINIO_ROOT_USER="minio"`,
false,
[]envKV{
{
Key: "MINIO_ROOT_USER",
Value: "minio",
},
},
},
// Value with single quotes
{
`export MINIO_ROOT_USER='minio'`,
false,
[]envKV{
{
Key: "MINIO_ROOT_USER",
Value: "minio",
},
},
},
{
`
MINIO_ROOT_USER=minio
MINIO_ROOT_PASSWORD=<PASSWORD>`,
false,
[]envKV{
{
Key: "MINIO_ROOT_USER",
Value: "minio",
},
{
Key: "MINIO_ROOT_PASSWORD",
Value: "<PASSWORD>",
},
},
},
{
`
export MINIO_ROOT_USERminio
export MINIO_ROOT_PASSWORD=<PASSWORD>`,
true,
nil,
},
{
`
# simple comment
# MINIO_ROOT_USER=minioadmin
# MINIO_ROOT_PASSWORD=<PASSWORD>
MINIO_ROOT_USER=minio
MINIO_ROOT_PASSWORD=<PASSWORD>`,
false,
[]envKV{
{
Key: "MINIO_ROOT_USER",
Value: "minio",
},
{
Key: "MINIO_ROOT_PASSWORD",
Value: "<PASSWORD>",
},
},
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
tmpfile, err := os.CreateTemp("", "testfile")
if err != nil {
t.Error(err)
}
tmpfile.WriteString(testCase.content)
tmpfile.Sync()
tmpfile.Close()
ekvs, err := minioEnvironFromFile(tmpfile.Name())
if err != nil && !testCase.expectedErr {
t.Error(err)
}
if err == nil && testCase.expectedErr {
t.Error(errors.New("expected error, found success"))
}
if len(ekvs) != len(testCase.expectedEkvs) {
t.Errorf("expected %v keys, got %v keys", len(testCase.expectedEkvs), len(ekvs))
}
if !reflect.DeepEqual(ekvs, testCase.expectedEkvs) {
t.Errorf("expected %v, got %v", testCase.expectedEkvs, ekvs)
}
})
}
}
<file_sep>#!/bin/bash
set -ex
export MODE="$1"
export ACCESS_KEY="$2"
export SECRET_KEY="$3"
export JOB_NAME="$4"
export MINT_MODE="full"
docker system prune -f || true
docker volume prune -f || true
docker volume rm $(docker volume ls -f dangling=true) || true
## change working directory
cd .github/workflows/mint
docker-compose -f minio-${MODE}.yaml up -d
sleep 5m
docker run --rm --net=host \
--name="mint-${MODE}-${JOB_NAME}" \
-e SERVER_ENDPOINT="127.0.0.1:9000" \
-e ACCESS_KEY="${ACCESS_KEY}" \
-e SECRET_KEY="${SECRET_KEY}" \
-e ENABLE_HTTPS=0 \
-e MINT_MODE="${MINT_MODE}" \
docker.io/minio/mint:edge
docker-compose -f minio-${MODE}.yaml down || true
sleep 10s
docker system prune -f || true
docker volume prune -f || true
docker volume rm $(docker volume ls -f dangling=true) || true
## change working directory
cd ../../../
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"errors"
"fmt"
"net/http"
"github.com/minio/kes-go"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/config"
iampolicy "github.com/minio/pkg/iam/policy"
)
// validateAdminReq will validate request against and return whether it is allowed.
// If any of the supplied actions are allowed it will be successful.
// If nil ObjectLayer is returned, the operation is not permitted.
// When nil ObjectLayer has been returned an error has always been sent to w.
func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Request, actions ...iampolicy.AdminAction) (ObjectLayer, auth.Credentials) {
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return nil, auth.Credentials{}
}
for _, action := range actions {
// Validate request signature.
cred, adminAPIErr := checkAdminRequestAuth(ctx, r, action, "")
switch adminAPIErr {
case ErrNone:
return objectAPI, cred
case ErrAccessDenied:
// Try another
continue
default:
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
return nil, cred
}
}
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return nil, auth.Credentials{}
}
// AdminError - is a generic error for all admin APIs.
type AdminError struct {
Code string
Message string
StatusCode int
}
func (ae AdminError) Error() string {
return ae.Message
}
func toAdminAPIErr(ctx context.Context, err error) APIError {
if err == nil {
return noError
}
var apiErr APIError
switch e := err.(type) {
case iampolicy.Error:
apiErr = APIError{
Code: "XMinioMalformedIAMPolicy",
Description: e.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case config.ErrConfigNotFound:
apiErr = APIError{
Code: "XMinioConfigNotFoundError",
Description: e.Error(),
HTTPStatusCode: http.StatusNotFound,
}
case config.ErrConfigGeneric:
apiErr = APIError{
Code: "XMinioConfigError",
Description: e.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case AdminError:
apiErr = APIError{
Code: e.Code,
Description: e.Message,
HTTPStatusCode: e.StatusCode,
}
case SRError:
apiErr = errorCodes.ToAPIErrWithErr(e.Code, e.Cause)
case decomError:
apiErr = APIError{
Code: "XMinioDecommissionNotAllowed",
Description: e.Err,
HTTPStatusCode: http.StatusBadRequest,
}
default:
switch {
case errors.Is(err, errTooManyPolicies):
apiErr = APIError{
Code: "XMinioAdminInvalidRequest",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errDecommissionAlreadyRunning):
apiErr = APIError{
Code: "XMinioDecommissionNotAllowed",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errDecommissionComplete):
apiErr = APIError{
Code: "XMinioDecommissionNotAllowed",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errDecommissionRebalanceAlreadyRunning):
apiErr = APIError{
Code: "XMinioDecommissionNotAllowed",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errRebalanceDecommissionAlreadyRunning):
apiErr = APIError{
Code: "XMinioRebalanceNotAllowed",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errConfigNotFound):
apiErr = APIError{
Code: "XMinioConfigError",
Description: err.Error(),
HTTPStatusCode: http.StatusNotFound,
}
case errors.Is(err, errIAMActionNotAllowed):
apiErr = APIError{
Code: "XMinioIAMActionNotAllowed",
Description: err.Error(),
HTTPStatusCode: http.StatusForbidden,
}
case errors.Is(err, errIAMServiceAccountNotAllowed):
apiErr = APIError{
Code: "XMinioIAMServiceAccountNotAllowed",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errIAMNotInitialized):
apiErr = APIError{
Code: "XMinioIAMNotInitialized",
Description: err.Error(),
HTTPStatusCode: http.StatusServiceUnavailable,
}
case errors.Is(err, errPolicyInUse):
apiErr = APIError{
Code: "XMinioIAMPolicyInUse",
Description: "The policy cannot be removed, as it is in use",
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errSessionPolicyTooLarge):
apiErr = APIError{
Code: "XMinioIAMServiceAccountSessionPolicyTooLarge",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, kes.ErrKeyExists):
apiErr = APIError{
Code: "XMinioKMSKeyExists",
Description: err.Error(),
HTTPStatusCode: http.StatusConflict,
}
// Tier admin API errors
case errors.Is(err, madmin.ErrTierNameEmpty):
apiErr = APIError{
Code: "XMinioAdminTierNameEmpty",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, madmin.ErrTierInvalidConfig):
apiErr = APIError{
Code: "XMinioAdminTierInvalidConfig",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, madmin.ErrTierInvalidConfigVersion):
apiErr = APIError{
Code: "XMinioAdminTierInvalidConfigVersion",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, madmin.ErrTierTypeUnsupported):
apiErr = APIError{
Code: "XMinioAdminTierTypeUnsupported",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errIsTierPermError(err):
apiErr = APIError{
Code: "XMinioAdminTierInsufficientPermissions",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
default:
apiErr = errorCodes.ToAPIErrWithErr(toAdminAPIErrCode(ctx, err), err)
}
}
return apiErr
}
// toAdminAPIErrCode - converts errErasureWriteQuorum error to admin API
// specific error.
func toAdminAPIErrCode(ctx context.Context, err error) APIErrorCode {
if errors.Is(err, errErasureWriteQuorum) {
return ErrAdminConfigNoQuorum
}
return toAPIErrorCode(ctx, err)
}
// wraps export error for more context
func exportError(ctx context.Context, err error, fname, entity string) APIError {
if entity == "" {
return toAPIError(ctx, fmt.Errorf("error exporting %s with: %w", fname, err))
}
return toAPIError(ctx, fmt.Errorf("error exporting %s from %s with: %w", entity, fname, err))
}
// wraps import error for more context
func importError(ctx context.Context, err error, fname, entity string) APIError {
if entity == "" {
return toAPIError(ctx, fmt.Errorf("error importing %s with: %w", fname, err))
}
return toAPIError(ctx, fmt.Errorf("error importing %s from %s with: %w", entity, fname, err))
}
// wraps import error for more context
func importErrorWithAPIErr(ctx context.Context, apiErr APIErrorCode, err error, fname, entity string) APIError {
if entity == "" {
return errorCodes.ToAPIErrWithErr(apiErr, fmt.Errorf("error importing %s with: %w", fname, err))
}
return errorCodes.ToAPIErrWithErr(apiErr, fmt.Errorf("error importing %s from %s with: %w", entity, fname, err))
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"math/rand"
"path"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/arn"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/color"
"github.com/minio/minio/internal/config"
xldap "github.com/minio/minio/internal/config/identity/ldap"
"github.com/minio/minio/internal/config/identity/openid"
idplugin "github.com/minio/minio/internal/config/identity/plugin"
xtls "github.com/minio/minio/internal/config/identity/tls"
"github.com/minio/minio/internal/config/policy/opa"
polplugin "github.com/minio/minio/internal/config/policy/plugin"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/jwt"
"github.com/minio/minio/internal/logger"
iampolicy "github.com/minio/pkg/iam/policy"
etcd "go.etcd.io/etcd/client/v3"
)
// UsersSysType - defines the type of users and groups system that is
// active on the server.
type UsersSysType string
// Types of users configured in the server.
const (
// This mode uses the internal users system in MinIO.
MinIOUsersSysType UsersSysType = "MinIOUsersSys"
// This mode uses users and groups from a configured LDAP
// server.
LDAPUsersSysType UsersSysType = "LDAPUsersSys"
)
const (
statusEnabled = "enabled"
statusDisabled = "disabled"
)
const (
embeddedPolicyType = "embedded-policy"
inheritedPolicyType = "inherited-policy"
)
// IAMSys - config system.
type IAMSys struct {
// Need to keep them here to keep alignment - ref: https://golang.org/pkg/sync/atomic/#pkg-note-BUG
// metrics
LastRefreshTimeUnixNano uint64
LastRefreshDurationMilliseconds uint64
TotalRefreshSuccesses uint64
TotalRefreshFailures uint64
sync.Mutex
iamRefreshInterval time.Duration
LDAPConfig xldap.Config // only valid if usersSysType is LDAPUsers
OpenIDConfig openid.Config // only valid if OpenID is configured
STSTLSConfig xtls.Config // only valid if STS TLS is configured
usersSysType UsersSysType
rolesMap map[arn.ARN]string
// Persistence layer for IAM subsystem
store *IAMStoreSys
// configLoaded will be closed and remain so after first load.
configLoaded chan struct{}
}
// IAMUserType represents a user type inside MinIO server
type IAMUserType int
const (
unknownIAMUserType IAMUserType = iota - 1
regUser
stsUser
svcUser
)
// LoadGroup - loads a specific group from storage, and updates the
// memberships cache. If the specified group does not exist in
// storage, it is removed from in-memory maps as well - this
// simplifies the implementation for group removal. This is called
// only via IAM notifications.
func (sys *IAMSys) LoadGroup(ctx context.Context, objAPI ObjectLayer, group string) error {
if !sys.Initialized() {
return errServerNotInitialized
}
return sys.store.GroupNotificationHandler(ctx, group)
}
// LoadPolicy - reloads a specific canned policy from backend disks or etcd.
func (sys *IAMSys) LoadPolicy(ctx context.Context, objAPI ObjectLayer, policyName string) error {
if !sys.Initialized() {
return errServerNotInitialized
}
return sys.store.PolicyNotificationHandler(ctx, policyName)
}
// LoadPolicyMapping - loads the mapped policy for a user or group
// from storage into server memory.
func (sys *IAMSys) LoadPolicyMapping(ctx context.Context, objAPI ObjectLayer, userOrGroup string, userType IAMUserType, isGroup bool) error {
if !sys.Initialized() {
return errServerNotInitialized
}
return sys.store.PolicyMappingNotificationHandler(ctx, userOrGroup, isGroup, userType)
}
// LoadUser - reloads a specific user from backend disks or etcd.
func (sys *IAMSys) LoadUser(ctx context.Context, objAPI ObjectLayer, accessKey string, userType IAMUserType) error {
if !sys.Initialized() {
return errServerNotInitialized
}
return sys.store.UserNotificationHandler(ctx, accessKey, userType)
}
// LoadServiceAccount - reloads a specific service account from backend disks or etcd.
func (sys *IAMSys) LoadServiceAccount(ctx context.Context, accessKey string) error {
if !sys.Initialized() {
return errServerNotInitialized
}
return sys.store.UserNotificationHandler(ctx, accessKey, svcUser)
}
// initStore initializes IAM stores
func (sys *IAMSys) initStore(objAPI ObjectLayer, etcdClient *etcd.Client) {
if sys.LDAPConfig.Enabled() {
sys.SetUsersSysType(LDAPUsersSysType)
}
if etcdClient == nil {
sys.store = &IAMStoreSys{newIAMObjectStore(objAPI, sys.usersSysType)}
} else {
sys.store = &IAMStoreSys{newIAMEtcdStore(etcdClient, sys.usersSysType)}
}
}
// Initialized checks if IAM is initialized
func (sys *IAMSys) Initialized() bool {
if sys == nil {
return false
}
sys.Lock()
defer sys.Unlock()
return sys.store != nil
}
// Load - loads all credentials, policies and policy mappings.
func (sys *IAMSys) Load(ctx context.Context) error {
loadStartTime := time.Now()
err := sys.store.LoadIAMCache(ctx)
if err != nil {
atomic.AddUint64(&sys.TotalRefreshFailures, 1)
return err
}
loadDuration := time.Since(loadStartTime)
atomic.StoreUint64(&sys.LastRefreshDurationMilliseconds, uint64(loadDuration.Milliseconds()))
atomic.StoreUint64(&sys.LastRefreshTimeUnixNano, uint64(loadStartTime.Add(loadDuration).UnixNano()))
atomic.AddUint64(&sys.TotalRefreshSuccesses, 1)
select {
case <-sys.configLoaded:
default:
close(sys.configLoaded)
}
return nil
}
// Init - initializes config system by reading entries from config/iam
func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer, etcdClient *etcd.Client, iamRefreshInterval time.Duration) {
bootstrapTrace("IAM initialization started")
globalServerConfigMu.RLock()
s := globalServerConfig
globalServerConfigMu.RUnlock()
openidConfig, err := openid.LookupConfig(s,
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize OpenID: %w", err))
}
// Initialize if LDAP is enabled
ldapConfig, err := xldap.Lookup(s, globalRootCAs)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to parse LDAP configuration: %w", err))
}
stsTLSConfig, err := xtls.Lookup(s[config.IdentityTLSSubSys][config.Default])
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize X.509/TLS STS API: %w", err))
}
if stsTLSConfig.InsecureSkipVerify {
logger.LogIf(ctx, fmt.Errorf("CRITICAL: enabling %s is not recommended in a production environment", xtls.EnvIdentityTLSSkipVerify))
}
authNPluginCfg, err := idplugin.LookupConfig(s[config.IdentityPluginSubSys][config.Default],
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize AuthNPlugin: %w", err))
}
setGlobalAuthNPlugin(idplugin.New(GlobalContext, authNPluginCfg))
authZPluginCfg, err := polplugin.LookupConfig(s, GetDefaultConnSettings(), xhttp.DrainBody)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize AuthZPlugin: %w", err))
}
if authZPluginCfg.URL == nil {
opaCfg, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default],
NewHTTPTransport(), xhttp.DrainBody)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize AuthZPlugin from legacy OPA config: %w", err))
} else {
authZPluginCfg.URL = opaCfg.URL
authZPluginCfg.AuthToken = opaCfg.AuthToken
authZPluginCfg.Transport = opaCfg.Transport
authZPluginCfg.CloseRespFn = opaCfg.CloseRespFn
}
}
setGlobalAuthZPlugin(polplugin.New(authZPluginCfg))
sys.Lock()
defer sys.Unlock()
sys.LDAPConfig = ldapConfig
sys.OpenIDConfig = openidConfig
sys.STSTLSConfig = stsTLSConfig
sys.iamRefreshInterval = iamRefreshInterval
// Initialize IAM store
sys.initStore(objAPI, etcdClient)
retryCtx, cancel := context.WithCancel(ctx)
// Indicate to our routine to exit cleanly upon return.
defer cancel()
r := rand.New(rand.NewSource(time.Now().UnixNano()))
// Migrate storage format if needed.
for {
// Migrate IAM configuration, if necessary.
if err := saveIAMFormat(retryCtx, sys.store); err != nil {
if configRetriableErrors(err) {
logger.Info("Waiting for all MinIO IAM sub-system to be initialized.. possible cause (%v)", err)
continue
}
logger.LogIf(ctx, fmt.Errorf("IAM sub-system is partially initialized, unable to write the IAM format: %w", err))
return
}
break
}
// Load IAM data from storage.
for {
if err := sys.Load(retryCtx); err != nil {
if configRetriableErrors(err) {
logger.Info("Waiting for all MinIO IAM sub-system to be initialized.. possible cause (%v)", err)
time.Sleep(time.Duration(r.Float64() * float64(5*time.Second)))
continue
}
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize IAM sub-system, some users may not be available: %w", err))
}
}
break
}
bootstrapTrace("finishing IAM loading")
refreshInterval := sys.iamRefreshInterval
// Set up polling for expired accounts and credentials purging.
switch {
case sys.OpenIDConfig.ProviderEnabled():
go func() {
timer := time.NewTimer(refreshInterval)
defer timer.Stop()
for {
select {
case <-timer.C:
sys.purgeExpiredCredentialsForExternalSSO(ctx)
timer.Reset(refreshInterval)
case <-ctx.Done():
return
}
}
}()
case sys.LDAPConfig.Enabled():
go func() {
timer := time.NewTimer(refreshInterval)
defer timer.Stop()
for {
select {
case <-timer.C:
sys.purgeExpiredCredentialsForLDAP(ctx)
sys.updateGroupMembershipsForLDAP(ctx)
timer.Reset(refreshInterval)
case <-ctx.Done():
return
}
}
}()
}
// Start watching changes to storage.
go sys.watch(ctx)
// Load RoleARNs
sys.rolesMap = make(map[arn.ARN]string)
// From OpenID
if riMap := sys.OpenIDConfig.GetRoleInfo(); riMap != nil {
sys.validateAndAddRolePolicyMappings(ctx, riMap)
}
// From AuthN plugin if enabled.
if authn := newGlobalAuthNPluginFn(); authn != nil {
riMap := authn.GetRoleInfo()
sys.validateAndAddRolePolicyMappings(ctx, riMap)
}
sys.printIAMRoles()
}
func (sys *IAMSys) validateAndAddRolePolicyMappings(ctx context.Context, m map[arn.ARN]string) {
// Validate that policies associated with roles are defined. If
// authZ plugin is set, role policies are just claims sent to
// the plugin and they need not exist.
//
// If some mapped policies do not exist, we print some error
// messages but continue any way - they can be fixed in the
// running server by creating the policies after start up.
for arn, rolePolicies := range m {
specifiedPoliciesSet := newMappedPolicy(rolePolicies).policySet()
validPolicies, _ := sys.store.FilterPolicies(rolePolicies, "")
knownPoliciesSet := newMappedPolicy(validPolicies).policySet()
unknownPoliciesSet := specifiedPoliciesSet.Difference(knownPoliciesSet)
if len(unknownPoliciesSet) > 0 {
authz := newGlobalAuthZPluginFn()
if authz == nil {
// Print a warning that some policies mapped to a role are not defined.
errMsg := fmt.Errorf(
"The policies \"%s\" mapped to role ARN %s are not defined - this role may not work as expected.",
unknownPoliciesSet.ToSlice(), arn.String())
logger.LogIf(ctx, errMsg)
}
}
sys.rolesMap[arn] = rolePolicies
}
}
// Prints IAM role ARNs.
func (sys *IAMSys) printIAMRoles() {
if len(sys.rolesMap) == 0 {
return
}
var arns []string
for arn := range sys.rolesMap {
arns = append(arns, arn.String())
}
sort.Strings(arns)
msgs := make([]string, 0, len(arns))
for _, arn := range arns {
msgs = append(msgs, color.Bold(arn))
}
logger.Info(fmt.Sprintf("%s %s", color.Blue("IAM Roles:"), strings.Join(msgs, " ")))
}
// HasWatcher - returns if the IAM system has a watcher to be notified of
// changes.
func (sys *IAMSys) HasWatcher() bool {
return sys.store.HasWatcher()
}
func (sys *IAMSys) watch(ctx context.Context) {
watcher, ok := sys.store.IAMStorageAPI.(iamStorageWatcher)
if ok {
ch := watcher.watch(ctx, iamConfigPrefix)
for event := range ch {
if err := sys.loadWatchedEvent(ctx, event); err != nil {
// we simply log errors
logger.LogIf(ctx, fmt.Errorf("Failure in loading watch event: %v", err))
}
}
return
}
var maxRefreshDurationSecondsForLog float64 = 10
// Load all items periodically
timer := time.NewTimer(sys.iamRefreshInterval)
defer timer.Stop()
for {
select {
case <-timer.C:
refreshStart := time.Now()
if err := sys.Load(ctx); err != nil {
logger.LogIf(ctx, fmt.Errorf("Failure in periodic refresh for IAM (took %.2fs): %v", time.Since(refreshStart).Seconds(), err))
} else {
took := time.Since(refreshStart).Seconds()
if took > maxRefreshDurationSecondsForLog {
// Log if we took a lot of time to load.
logger.Info("IAM refresh took %.2fs", took)
}
}
timer.Reset(sys.iamRefreshInterval)
case <-ctx.Done():
return
}
}
}
func (sys *IAMSys) loadWatchedEvent(ctx context.Context, event iamWatchEvent) (err error) {
usersPrefix := strings.HasPrefix(event.keyPath, iamConfigUsersPrefix)
groupsPrefix := strings.HasPrefix(event.keyPath, iamConfigGroupsPrefix)
stsPrefix := strings.HasPrefix(event.keyPath, iamConfigSTSPrefix)
svcPrefix := strings.HasPrefix(event.keyPath, iamConfigServiceAccountsPrefix)
policyPrefix := strings.HasPrefix(event.keyPath, iamConfigPoliciesPrefix)
policyDBUsersPrefix := strings.HasPrefix(event.keyPath, iamConfigPolicyDBUsersPrefix)
policyDBSTSUsersPrefix := strings.HasPrefix(event.keyPath, iamConfigPolicyDBSTSUsersPrefix)
policyDBGroupsPrefix := strings.HasPrefix(event.keyPath, iamConfigPolicyDBGroupsPrefix)
ctx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
defer cancel()
switch {
case usersPrefix:
accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigUsersPrefix))
err = sys.store.UserNotificationHandler(ctx, accessKey, regUser)
case stsPrefix:
accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigSTSPrefix))
err = sys.store.UserNotificationHandler(ctx, accessKey, stsUser)
case svcPrefix:
accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigServiceAccountsPrefix))
err = sys.store.UserNotificationHandler(ctx, accessKey, svcUser)
case groupsPrefix:
group := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigGroupsPrefix))
err = sys.store.GroupNotificationHandler(ctx, group)
case policyPrefix:
policyName := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigPoliciesPrefix))
err = sys.store.PolicyNotificationHandler(ctx, policyName)
case policyDBUsersPrefix:
policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBUsersPrefix)
user := strings.TrimSuffix(policyMapFile, ".json")
err = sys.store.PolicyMappingNotificationHandler(ctx, user, false, regUser)
case policyDBSTSUsersPrefix:
policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBSTSUsersPrefix)
user := strings.TrimSuffix(policyMapFile, ".json")
err = sys.store.PolicyMappingNotificationHandler(ctx, user, false, stsUser)
case policyDBGroupsPrefix:
policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBGroupsPrefix)
user := strings.TrimSuffix(policyMapFile, ".json")
err = sys.store.PolicyMappingNotificationHandler(ctx, user, true, regUser)
}
return err
}
// HasRolePolicy - returns if a role policy is configured for IAM.
func (sys *IAMSys) HasRolePolicy() bool {
return len(sys.rolesMap) > 0
}
// GetRolePolicy - returns policies associated with a role ARN.
func (sys *IAMSys) GetRolePolicy(arnStr string) (arn.ARN, string, error) {
roleArn, err := arn.Parse(arnStr)
if err != nil {
return arn.ARN{}, "", fmt.Errorf("RoleARN parse err: %v", err)
}
rolePolicy, ok := sys.rolesMap[roleArn]
if !ok {
return arn.ARN{}, "", fmt.Errorf("RoleARN %s is not defined.", arnStr)
}
return roleArn, rolePolicy, nil
}
// DeletePolicy - deletes a canned policy from backend or etcd.
func (sys *IAMSys) DeletePolicy(ctx context.Context, policyName string, notifyPeers bool) error {
if !sys.Initialized() {
return errServerNotInitialized
}
for _, v := range iampolicy.DefaultPolicies {
if v.Name == policyName {
if err := checkConfig(ctx, globalObjectAPI, getPolicyDocPath(policyName)); err != nil && err == errConfigNotFound {
return fmt.Errorf("inbuilt policy `%s` not allowed to be deleted", policyName)
}
}
}
err := sys.store.DeletePolicy(ctx, policyName)
if err != nil {
return err
}
if !notifyPeers || sys.HasWatcher() {
return nil
}
// Notify all other MinIO peers to delete policy
for _, nerr := range globalNotificationSys.DeletePolicy(policyName) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
return nil
}
// InfoPolicy - returns the policy definition with some metadata.
func (sys *IAMSys) InfoPolicy(policyName string) (*madmin.PolicyInfo, error) {
if !sys.Initialized() {
return nil, errServerNotInitialized
}
d, err := sys.store.GetPolicyDoc(policyName)
if err != nil {
return nil, err
}
pdata, err := json.Marshal(d.Policy)
if err != nil {
return nil, err
}
return &madmin.PolicyInfo{
PolicyName: policyName,
Policy: pdata,
CreateDate: d.CreateDate,
UpdateDate: d.UpdateDate,
}, nil
}
// ListPolicies - lists all canned policies.
func (sys *IAMSys) ListPolicies(ctx context.Context, bucketName string) (map[string]iampolicy.Policy, error) {
if !sys.Initialized() {
return nil, errServerNotInitialized
}
select {
case <-sys.configLoaded:
return sys.store.ListPolicies(ctx, bucketName)
case <-ctx.Done():
return nil, ctx.Err()
}
}
// ListPolicyDocs - lists all canned policy docs.
func (sys *IAMSys) ListPolicyDocs(ctx context.Context, bucketName string) (map[string]PolicyDoc, error) {
if !sys.Initialized() {
return nil, errServerNotInitialized
}
select {
case <-sys.configLoaded:
return sys.store.ListPolicyDocs(ctx, bucketName)
case <-ctx.Done():
return nil, ctx.Err()
}
}
// SetPolicy - sets a new named policy.
func (sys *IAMSys) SetPolicy(ctx context.Context, policyName string, p iampolicy.Policy) (time.Time, error) {
if !sys.Initialized() {
return time.Time{}, errServerNotInitialized
}
updatedAt, err := sys.store.SetPolicy(ctx, policyName, p)
if err != nil {
return updatedAt, err
}
if !sys.HasWatcher() {
// Notify all other MinIO peers to reload policy
for _, nerr := range globalNotificationSys.LoadPolicy(policyName) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
return updatedAt, nil
}
// DeleteUser - delete user (only for long-term users not STS users).
func (sys *IAMSys) DeleteUser(ctx context.Context, accessKey string, notifyPeers bool) error {
if !sys.Initialized() {
return errServerNotInitialized
}
if err := sys.store.DeleteUser(ctx, accessKey, regUser); err != nil {
return err
}
// Notify all other MinIO peers to delete user.
if notifyPeers && !sys.HasWatcher() {
for _, nerr := range globalNotificationSys.DeleteUser(accessKey) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
return nil
}
// CurrentPolicies - returns comma separated policy string, from
// an input policy after validating if there are any current
// policies which exist on MinIO corresponding to the input.
func (sys *IAMSys) CurrentPolicies(policyName string) string {
if !sys.Initialized() {
return ""
}
policies, _ := sys.store.FilterPolicies(policyName, "")
return policies
}
func (sys *IAMSys) notifyForUser(ctx context.Context, accessKey string, isTemp bool) {
// Notify all other MinIO peers to reload user.
if !sys.HasWatcher() {
for _, nerr := range globalNotificationSys.LoadUser(accessKey, isTemp) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
}
// SetTempUser - set temporary user credentials, these credentials have an
// expiry. The permissions for these STS credentials is determined in one of the
// following ways:
//
// - RoleARN - if a role-arn is specified in the request, the STS credential's
// policy is the role's policy.
//
// - inherited from parent - this is the case for AssumeRole API, where the
// parent user is an actual real user with their own (permanent) credentials and
// policy association.
//
// - inherited from "virtual" parent - this is the case for AssumeRoleWithLDAP
// where the parent user is the DN of the actual LDAP user. The parent user
// itself cannot login, but the policy associated with them determines the base
// policy for the STS credential. The policy mapping can be updated by the
// administrator.
//
// - from `Subject.CommonName` field from the STS request for
// AssumeRoleWithCertificate. In this case, the policy for the STS credential
// has the same name as the value of this field.
//
// - from special JWT claim from STS request for AssumeRoleWithOIDC API (when
// not using RoleARN). The claim value can be a string or a list and refers to
// the names of access policies.
//
// For all except the RoleARN case, the implementation is the same - the policy
// for the STS credential is associated with a parent user. For the
// AssumeRoleWithCertificate case, the "virtual" parent user is the value of the
// `Subject.CommonName` field. For the OIDC (without RoleARN) case the "virtual"
// parent is derived as a concatenation of the `sub` and `iss` fields. The
// policies applicable to the STS credential are associated with this "virtual"
// parent.
//
// When a policyName is given to this function, the policy association is
// created and stored in the IAM store. Thus, it should NOT be given for the
// role-arn case (because the role-to-policy mapping is separately stored
// elsewhere), the AssumeRole case (because the parent user is real and their
// policy is associated via policy-set API) and the AssumeRoleWithLDAP case
// (because the policy association is made via policy-set API).
func (sys *IAMSys) SetTempUser(ctx context.Context, accessKey string, cred auth.Credentials, policyName string) (time.Time, error) {
if !sys.Initialized() {
return time.Time{}, errServerNotInitialized
}
if newGlobalAuthZPluginFn() != nil {
// If OPA is set, we do not need to set a policy mapping.
policyName = ""
}
updatedAt, err := sys.store.SetTempUser(ctx, accessKey, cred, policyName)
if err != nil {
return time.Time{}, err
}
sys.notifyForUser(ctx, cred.AccessKey, true)
return updatedAt, nil
}
// ListBucketUsers - list all users who can access this 'bucket'
func (sys *IAMSys) ListBucketUsers(ctx context.Context, bucket string) (map[string]madmin.UserInfo, error) {
if !sys.Initialized() {
return nil, errServerNotInitialized
}
select {
case <-sys.configLoaded:
return sys.store.GetBucketUsers(bucket)
case <-ctx.Done():
return nil, ctx.Err()
}
}
// ListUsers - list all users.
func (sys *IAMSys) ListUsers(ctx context.Context) (map[string]madmin.UserInfo, error) {
if !sys.Initialized() {
return nil, errServerNotInitialized
}
select {
case <-sys.configLoaded:
return sys.store.GetUsers(), nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
// ListLDAPUsers - list LDAP users which has
func (sys *IAMSys) ListLDAPUsers(ctx context.Context) (map[string]madmin.UserInfo, error) {
if !sys.Initialized() {
return nil, errServerNotInitialized
}
if sys.usersSysType != LDAPUsersSysType {
return nil, errIAMActionNotAllowed
}
select {
case <-sys.configLoaded:
ldapUsers := make(map[string]madmin.UserInfo)
for user, policy := range sys.store.GetUsersWithMappedPolicies() {
ldapUsers[user] = madmin.UserInfo{
PolicyName: policy,
Status: madmin.AccountEnabled,
}
}
return ldapUsers, nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
// QueryLDAPPolicyEntities - queries policy associations for LDAP users/groups/policies.
func (sys *IAMSys) QueryLDAPPolicyEntities(ctx context.Context, q madmin.PolicyEntitiesQuery) (*madmin.PolicyEntitiesResult, error) {
if !sys.Initialized() {
return nil, errServerNotInitialized
}
if !sys.LDAPConfig.Enabled() {
return nil, errIAMActionNotAllowed
}
select {
case <-sys.configLoaded:
pe := sys.store.ListPolicyMappings(q, sys.LDAPConfig.IsLDAPUserDN, sys.LDAPConfig.IsLDAPGroupDN)
pe.Timestamp = UTCNow()
return &pe, nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
// IsTempUser - returns if given key is a temporary user and parent user.
func (sys *IAMSys) IsTempUser(name string) (bool, string, error) {
if !sys.Initialized() {
return false, "", errServerNotInitialized
}
u, found := sys.store.GetUser(name)
if !found {
return false, "", errNoSuchUser
}
cred := u.Credentials
if cred.IsTemp() {
return true, cred.ParentUser, nil
}
return false, "", nil
}
// IsServiceAccount - returns if given key is a service account
func (sys *IAMSys) IsServiceAccount(name string) (bool, string, error) {
if !sys.Initialized() {
return false, "", errServerNotInitialized
}
u, found := sys.store.GetUser(name)
if !found {
return false, "", errNoSuchUser
}
cred := u.Credentials
if cred.IsServiceAccount() {
return true, cred.ParentUser, nil
}
return false, "", nil
}
// GetUserInfo - get info on a user.
func (sys *IAMSys) GetUserInfo(ctx context.Context, name string) (u madmin.UserInfo, err error) {
if !sys.Initialized() {
return u, errServerNotInitialized
}
select {
case <-sys.configLoaded:
default:
sys.store.LoadUser(ctx, name)
}
return sys.store.GetUserInfo(name)
}
// QueryPolicyEntities - queries policy associations for builtin users/groups/policies.
func (sys *IAMSys) QueryPolicyEntities(ctx context.Context, q madmin.PolicyEntitiesQuery) (*madmin.PolicyEntitiesResult, error) {
if !sys.Initialized() {
return nil, errServerNotInitialized
}
select {
case <-sys.configLoaded:
var userPredicate, groupPredicate func(string) bool
if sys.LDAPConfig.Enabled() {
userPredicate = func(s string) bool {
return !sys.LDAPConfig.IsLDAPUserDN(s)
}
groupPredicate = func(s string) bool {
return !sys.LDAPConfig.IsLDAPGroupDN(s)
}
}
pe := sys.store.ListPolicyMappings(q, userPredicate, groupPredicate)
pe.Timestamp = UTCNow()
return &pe, nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
// SetUserStatus - sets current user status, supports disabled or enabled.
func (sys *IAMSys) SetUserStatus(ctx context.Context, accessKey string, status madmin.AccountStatus) (updatedAt time.Time, err error) {
if !sys.Initialized() {
return updatedAt, errServerNotInitialized
}
if sys.usersSysType != MinIOUsersSysType {
return updatedAt, errIAMActionNotAllowed
}
updatedAt, err = sys.store.SetUserStatus(ctx, accessKey, status)
if err != nil {
return
}
sys.notifyForUser(ctx, accessKey, false)
return updatedAt, nil
}
func (sys *IAMSys) notifyForServiceAccount(ctx context.Context, accessKey string) {
// Notify all other Minio peers to reload the service account
if !sys.HasWatcher() {
for _, nerr := range globalNotificationSys.LoadServiceAccount(accessKey) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
}
type newServiceAccountOpts struct {
sessionPolicy *iampolicy.Policy
accessKey string
secretKey string
name, description string
expiration *time.Time
allowSiteReplicatorAccount bool // allow creating internal service account for site-replication.
claims map[string]interface{}
}
// NewServiceAccount - create a new service account
func (sys *IAMSys) NewServiceAccount(ctx context.Context, parentUser string, groups []string, opts newServiceAccountOpts) (auth.Credentials, time.Time, error) {
if !sys.Initialized() {
return auth.Credentials{}, time.Time{}, errServerNotInitialized
}
if parentUser == "" {
return auth.Credentials{}, time.Time{}, errInvalidArgument
}
var policyBuf []byte
if opts.sessionPolicy != nil {
err := opts.sessionPolicy.Validate()
if err != nil {
return auth.Credentials{}, time.Time{}, err
}
policyBuf, err = json.Marshal(opts.sessionPolicy)
if err != nil {
return auth.Credentials{}, time.Time{}, err
}
if len(policyBuf) > 2048 {
return auth.Credentials{}, time.Time{}, errSessionPolicyTooLarge
}
}
// found newly requested service account, to be same as
// parentUser, reject such operations.
if parentUser == opts.accessKey {
return auth.Credentials{}, time.Time{}, errIAMActionNotAllowed
}
if siteReplicatorSvcAcc == opts.accessKey && !opts.allowSiteReplicatorAccount {
return auth.Credentials{}, time.Time{}, errIAMActionNotAllowed
}
m := make(map[string]interface{})
m[parentClaim] = parentUser
if len(policyBuf) > 0 {
m[iampolicy.SessionPolicyName] = base64.StdEncoding.EncodeToString(policyBuf)
m[iamPolicyClaimNameSA()] = embeddedPolicyType
} else {
m[iamPolicyClaimNameSA()] = inheritedPolicyType
}
// Add all the necessary claims for the service accounts.
for k, v := range opts.claims {
_, ok := m[k]
if !ok {
m[k] = v
}
}
var accessKey, secretKey string
var err error
if len(opts.accessKey) > 0 {
accessKey, secretKey = opts.accessKey, opts.secretKey
} else {
accessKey, secretKey, err = auth.GenerateCredentials()
if err != nil {
return auth.Credentials{}, time.Time{}, err
}
}
cred, err := auth.CreateNewCredentialsWithMetadata(accessKey, secretKey, m, secretKey)
if err != nil {
return auth.Credentials{}, time.Time{}, err
}
cred.ParentUser = parentUser
cred.Groups = groups
cred.Status = string(auth.AccountOn)
cred.Name = opts.name
cred.Description = opts.description
if opts.expiration != nil {
expirationInUTC := opts.expiration.UTC()
if err := validateSvcExpirationInUTC(expirationInUTC); err != nil {
return auth.Credentials{}, time.Time{}, err
}
cred.Expiration = expirationInUTC
}
updatedAt, err := sys.store.AddServiceAccount(ctx, cred)
if err != nil {
return auth.Credentials{}, time.Time{}, err
}
sys.notifyForServiceAccount(ctx, cred.AccessKey)
return cred, updatedAt, nil
}
type updateServiceAccountOpts struct {
sessionPolicy *iampolicy.Policy
secretKey string
status string
name, description string
expiration *time.Time
}
// UpdateServiceAccount - edit a service account
func (sys *IAMSys) UpdateServiceAccount(ctx context.Context, accessKey string, opts updateServiceAccountOpts) (updatedAt time.Time, err error) {
if !sys.Initialized() {
return updatedAt, errServerNotInitialized
}
updatedAt, err = sys.store.UpdateServiceAccount(ctx, accessKey, opts)
if err != nil {
return updatedAt, err
}
sys.notifyForServiceAccount(ctx, accessKey)
return updatedAt, nil
}
// ListServiceAccounts - lists all services accounts associated to a specific user
func (sys *IAMSys) ListServiceAccounts(ctx context.Context, accessKey string) ([]auth.Credentials, error) {
if !sys.Initialized() {
return nil, errServerNotInitialized
}
select {
case <-sys.configLoaded:
return sys.store.ListServiceAccounts(ctx, accessKey)
case <-ctx.Done():
return nil, ctx.Err()
}
}
// ListTempAccounts - lists all services accounts associated to a specific user
func (sys *IAMSys) ListTempAccounts(ctx context.Context, accessKey string) ([]UserIdentity, error) {
if !sys.Initialized() {
return nil, errServerNotInitialized
}
select {
case <-sys.configLoaded:
return sys.store.ListTempAccounts(ctx, accessKey)
case <-ctx.Done():
return nil, ctx.Err()
}
}
// GetServiceAccount - wrapper method to get information about a service account
func (sys *IAMSys) GetServiceAccount(ctx context.Context, accessKey string) (auth.Credentials, *iampolicy.Policy, error) {
sa, embeddedPolicy, err := sys.getServiceAccount(ctx, accessKey)
if err != nil {
return auth.Credentials{}, nil, err
}
// Hide secret & session keys
sa.Credentials.SecretKey = ""
sa.Credentials.SessionToken = ""
return sa.Credentials, embeddedPolicy, nil
}
func (sys *IAMSys) getServiceAccount(ctx context.Context, accessKey string) (UserIdentity, *iampolicy.Policy, error) {
sa, jwtClaims, err := sys.getAccountWithClaims(ctx, accessKey)
if err != nil {
if err == errNoSuchAccount {
return UserIdentity{}, nil, errNoSuchServiceAccount
}
return UserIdentity{}, nil, err
}
if !sa.Credentials.IsServiceAccount() {
return UserIdentity{}, nil, errNoSuchServiceAccount
}
var embeddedPolicy *iampolicy.Policy
pt, ptok := jwtClaims.Lookup(iamPolicyClaimNameSA())
sp, spok := jwtClaims.Lookup(iampolicy.SessionPolicyName)
if ptok && spok && pt == embeddedPolicyType {
policyBytes, err := base64.StdEncoding.DecodeString(sp)
if err != nil {
return UserIdentity{}, nil, err
}
embeddedPolicy, err = iampolicy.ParseConfig(bytes.NewReader(policyBytes))
if err != nil {
return UserIdentity{}, nil, err
}
}
return sa, embeddedPolicy, nil
}
// GetTemporaryAccount - wrapper method to get information about a temporary account
func (sys *IAMSys) GetTemporaryAccount(ctx context.Context, accessKey string) (auth.Credentials, *iampolicy.Policy, error) {
tmpAcc, embeddedPolicy, err := sys.getTempAccount(ctx, accessKey)
if err != nil {
return auth.Credentials{}, nil, err
}
// Hide secret & session keys
tmpAcc.Credentials.SecretKey = ""
tmpAcc.Credentials.SessionToken = ""
return tmpAcc.Credentials, embeddedPolicy, nil
}
func (sys *IAMSys) getTempAccount(ctx context.Context, accessKey string) (UserIdentity, *iampolicy.Policy, error) {
tmpAcc, claims, err := sys.getAccountWithClaims(ctx, accessKey)
if err != nil {
if err == errNoSuchAccount {
return UserIdentity{}, nil, errNoSuchTempAccount
}
return UserIdentity{}, nil, err
}
if !tmpAcc.Credentials.IsTemp() {
return UserIdentity{}, nil, errNoSuchTempAccount
}
var embeddedPolicy *iampolicy.Policy
sp, spok := claims.Lookup(iampolicy.SessionPolicyName)
if spok {
policyBytes, err := base64.StdEncoding.DecodeString(sp)
if err != nil {
return UserIdentity{}, nil, err
}
embeddedPolicy, err = iampolicy.ParseConfig(bytes.NewReader(policyBytes))
if err != nil {
return UserIdentity{}, nil, err
}
}
return tmpAcc, embeddedPolicy, nil
}
// getAccountWithClaims - gets information about an account with claims
func (sys *IAMSys) getAccountWithClaims(ctx context.Context, accessKey string) (UserIdentity, *jwt.MapClaims, error) {
if !sys.Initialized() {
return UserIdentity{}, nil, errServerNotInitialized
}
acc, ok := sys.store.GetUser(accessKey)
if !ok {
return UserIdentity{}, nil, errNoSuchAccount
}
jwtClaims, err := extractJWTClaims(acc)
if err != nil {
return UserIdentity{}, nil, err
}
return acc, jwtClaims, nil
}
// GetClaimsForSvcAcc - gets the claims associated with the service account.
func (sys *IAMSys) GetClaimsForSvcAcc(ctx context.Context, accessKey string) (map[string]interface{}, error) {
if !sys.Initialized() {
return nil, errServerNotInitialized
}
if sys.usersSysType != LDAPUsersSysType {
return nil, nil
}
sa, ok := sys.store.GetUser(accessKey)
if !ok || !sa.Credentials.IsServiceAccount() {
return nil, errNoSuchServiceAccount
}
jwtClaims, err := extractJWTClaims(sa)
if err != nil {
return nil, err
}
return jwtClaims.Map(), nil
}
// DeleteServiceAccount - delete a service account
func (sys *IAMSys) DeleteServiceAccount(ctx context.Context, accessKey string, notifyPeers bool) error {
if !sys.Initialized() {
return errServerNotInitialized
}
sa, ok := sys.store.GetUser(accessKey)
if !ok || !sa.Credentials.IsServiceAccount() {
return nil
}
if err := sys.store.DeleteUser(ctx, accessKey, svcUser); err != nil {
return err
}
if notifyPeers && !sys.HasWatcher() {
for _, nerr := range globalNotificationSys.DeleteServiceAccount(accessKey) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
return nil
}
// CreateUser - create new user credentials and policy, if user already exists
// they shall be rewritten with new inputs.
func (sys *IAMSys) CreateUser(ctx context.Context, accessKey string, ureq madmin.AddOrUpdateUserReq) (updatedAt time.Time, err error) {
if !sys.Initialized() {
return updatedAt, errServerNotInitialized
}
if sys.usersSysType != MinIOUsersSysType {
return updatedAt, errIAMActionNotAllowed
}
if !auth.IsAccessKeyValid(accessKey) {
return updatedAt, auth.ErrInvalidAccessKeyLength
}
if !auth.IsSecretKeyValid(ureq.SecretKey) {
return updatedAt, auth.ErrInvalidSecretKeyLength
}
updatedAt, err = sys.store.AddUser(ctx, accessKey, ureq)
if err != nil {
return updatedAt, err
}
sys.notifyForUser(ctx, accessKey, false)
return updatedAt, nil
}
// SetUserSecretKey - sets user secret key
func (sys *IAMSys) SetUserSecretKey(ctx context.Context, accessKey string, secretKey string) error {
if !sys.Initialized() {
return errServerNotInitialized
}
if sys.usersSysType != MinIOUsersSysType {
return errIAMActionNotAllowed
}
if !auth.IsAccessKeyValid(accessKey) {
return auth.ErrInvalidAccessKeyLength
}
if !auth.IsSecretKeyValid(secretKey) {
return auth.ErrInvalidSecretKeyLength
}
return sys.store.UpdateUserSecretKey(ctx, accessKey, secretKey)
}
// purgeExpiredCredentialsForExternalSSO - validates if local credentials are still valid
// by checking remote IDP if the relevant users are still active and present.
func (sys *IAMSys) purgeExpiredCredentialsForExternalSSO(ctx context.Context) {
parentUsersMap := sys.store.GetAllParentUsers()
var expiredUsers []string
for parentUser, puInfo := range parentUsersMap {
// There are multiple role ARNs for parent user only when there
// are multiple openid provider configurations with the same ID
// provider. We lookup the provider associated with some one of
// the roleARNs to check if the user still exists. If they don't
// we can safely remove credentials for this parent user
// associated with any of the provider configurations.
//
// If there is no roleARN mapped to the user, the user may be
// coming from a policy claim based openid provider.
roleArns := puInfo.roleArns.ToSlice()
var roleArn string
if len(roleArns) == 0 {
logger.LogIf(GlobalContext,
fmt.Errorf("parentUser: %s had no roleArns mapped!", parentUser))
continue
}
roleArn = roleArns[0]
u, err := sys.OpenIDConfig.LookupUser(roleArn, puInfo.subClaimValue)
if err != nil {
logger.LogIf(GlobalContext, err)
continue
}
// If user is set to "disabled", we will remove them
// subsequently.
if !u.Enabled {
expiredUsers = append(expiredUsers, parentUser)
}
}
// We ignore any errors
_ = sys.store.DeleteUsers(ctx, expiredUsers)
}
// purgeExpiredCredentialsForLDAP - validates if local credentials are still
// valid by checking LDAP server if the relevant users are still present.
func (sys *IAMSys) purgeExpiredCredentialsForLDAP(ctx context.Context) {
parentUsers := sys.store.GetAllParentUsers()
var allDistNames []string
for parentUser := range parentUsers {
if !sys.LDAPConfig.IsLDAPUserDN(parentUser) {
continue
}
allDistNames = append(allDistNames, parentUser)
}
expiredUsers, err := sys.LDAPConfig.GetNonEligibleUserDistNames(allDistNames)
if err != nil {
// Log and return on error - perhaps it'll work the next time.
logger.LogIf(GlobalContext, err)
return
}
// We ignore any errors
_ = sys.store.DeleteUsers(ctx, expiredUsers)
}
// updateGroupMembershipsForLDAP - updates the list of groups associated with the credential.
func (sys *IAMSys) updateGroupMembershipsForLDAP(ctx context.Context) {
// 1. Collect all LDAP users with active creds.
allCreds := sys.store.GetSTSAndServiceAccounts()
// List of unique LDAP (parent) user DNs that have active creds
var parentUsers []string
// Map of LDAP user to list of active credential objects
parentUserToCredsMap := make(map[string][]auth.Credentials)
// DN to ldap username mapping for each LDAP user
parentUserToLDAPUsernameMap := make(map[string]string)
for _, cred := range allCreds {
if !sys.LDAPConfig.IsLDAPUserDN(cred.ParentUser) {
continue
}
// Check if this is the first time we are
// encountering this LDAP user.
if _, ok := parentUserToCredsMap[cred.ParentUser]; !ok {
// Try to find the ldapUsername for this
// parentUser by extracting JWT claims
var (
jwtClaims *jwt.MapClaims
err error
)
if cred.SessionToken == "" {
continue
}
if cred.IsServiceAccount() {
jwtClaims, err = auth.ExtractClaims(cred.SessionToken, cred.SecretKey)
if err != nil {
jwtClaims, err = auth.ExtractClaims(cred.SessionToken, globalActiveCred.SecretKey)
}
} else {
jwtClaims, err = auth.ExtractClaims(cred.SessionToken, globalActiveCred.SecretKey)
}
if err != nil {
// skip this cred - session token seems invalid
continue
}
ldapUsername, ok := jwtClaims.Lookup(ldapUserN)
if !ok {
// skip this cred - we dont have the
// username info needed
continue
}
// Collect each new cred.ParentUser into parentUsers
parentUsers = append(parentUsers, cred.ParentUser)
// Update the ldapUsernameMap
parentUserToLDAPUsernameMap[cred.ParentUser] = ldapUsername
}
parentUserToCredsMap[cred.ParentUser] = append(parentUserToCredsMap[cred.ParentUser], cred)
}
// 2. Query LDAP server for groups of the LDAP users collected.
updatedGroups, err := sys.LDAPConfig.LookupGroupMemberships(parentUsers, parentUserToLDAPUsernameMap)
if err != nil {
// Log and return on error - perhaps it'll work the next time.
logger.LogIf(GlobalContext, err)
return
}
// 3. Update creds for those users whose groups are changed
for _, parentUser := range parentUsers {
currGroupsSet := updatedGroups[parentUser]
currGroups := currGroupsSet.ToSlice()
for _, cred := range parentUserToCredsMap[parentUser] {
gSet := set.CreateStringSet(cred.Groups...)
if gSet.Equals(currGroupsSet) {
// No change to groups memberships for this
// credential.
continue
}
cred.Groups = currGroups
if err := sys.store.UpdateUserIdentity(ctx, cred); err != nil {
// Log and continue error - perhaps it'll work the next time.
logger.LogIf(GlobalContext, err)
}
}
}
}
// GetUser - get user credentials
func (sys *IAMSys) GetUser(ctx context.Context, accessKey string) (u UserIdentity, ok bool) {
if !sys.Initialized() {
return u, false
}
fallback := false
select {
case <-sys.configLoaded:
default:
sys.store.LoadUser(ctx, accessKey)
fallback = true
}
u, ok = sys.store.GetUser(accessKey)
if !ok && !fallback {
// accessKey not found, also
// IAM store is not in fallback mode
// we can try to reload again from
// the IAM store and see if credential
// exists now. If it doesn't proceed to
// fail.
sys.store.LoadUser(ctx, accessKey)
u, ok = sys.store.GetUser(accessKey)
}
if !ok {
if accessKey == globalActiveCred.AccessKey {
return newUserIdentity(globalActiveCred), true
}
}
return u, ok && u.Credentials.IsValid()
}
// Notify all other MinIO peers to load group.
func (sys *IAMSys) notifyForGroup(ctx context.Context, group string) {
if !sys.HasWatcher() {
for _, nerr := range globalNotificationSys.LoadGroup(group) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
}
// AddUsersToGroup - adds users to a group, creating the group if
// needed. No error if user(s) already are in the group.
func (sys *IAMSys) AddUsersToGroup(ctx context.Context, group string, members []string) (updatedAt time.Time, err error) {
if !sys.Initialized() {
return updatedAt, errServerNotInitialized
}
if sys.usersSysType != MinIOUsersSysType {
return updatedAt, errIAMActionNotAllowed
}
updatedAt, err = sys.store.AddUsersToGroup(ctx, group, members)
if err != nil {
return updatedAt, err
}
sys.notifyForGroup(ctx, group)
return updatedAt, nil
}
// RemoveUsersFromGroup - remove users from group. If no users are
// given, and the group is empty, deletes the group as well.
func (sys *IAMSys) RemoveUsersFromGroup(ctx context.Context, group string, members []string) (updatedAt time.Time, err error) {
if !sys.Initialized() {
return updatedAt, errServerNotInitialized
}
if sys.usersSysType != MinIOUsersSysType {
return updatedAt, errIAMActionNotAllowed
}
updatedAt, err = sys.store.RemoveUsersFromGroup(ctx, group, members)
if err != nil {
return updatedAt, err
}
sys.notifyForGroup(ctx, group)
return updatedAt, nil
}
// SetGroupStatus - enable/disabled a group
func (sys *IAMSys) SetGroupStatus(ctx context.Context, group string, enabled bool) (updatedAt time.Time, err error) {
if !sys.Initialized() {
return updatedAt, errServerNotInitialized
}
if sys.usersSysType != MinIOUsersSysType {
return updatedAt, errIAMActionNotAllowed
}
updatedAt, err = sys.store.SetGroupStatus(ctx, group, enabled)
if err != nil {
return updatedAt, err
}
sys.notifyForGroup(ctx, group)
return updatedAt, nil
}
// GetGroupDescription - builds up group description
func (sys *IAMSys) GetGroupDescription(group string) (gd madmin.GroupDesc, err error) {
if !sys.Initialized() {
return gd, errServerNotInitialized
}
return sys.store.GetGroupDescription(group)
}
// ListGroups - lists groups.
func (sys *IAMSys) ListGroups(ctx context.Context) (r []string, err error) {
if !sys.Initialized() {
return r, errServerNotInitialized
}
select {
case <-sys.configLoaded:
return sys.store.ListGroups(ctx)
case <-ctx.Done():
return nil, ctx.Err()
}
}
// PolicyDBSet - sets a policy for a user or group in the PolicyDB - the user doesn't have to exist since sometimes they are virtuals
func (sys *IAMSys) PolicyDBSet(ctx context.Context, name, policy string, userType IAMUserType, isGroup bool) (updatedAt time.Time, err error) {
if !sys.Initialized() {
return updatedAt, errServerNotInitialized
}
updatedAt, err = sys.store.PolicyDBSet(ctx, name, policy, userType, isGroup)
if err != nil {
return
}
// Notify all other MinIO peers to reload policy
if !sys.HasWatcher() {
for _, nerr := range globalNotificationSys.LoadPolicyMapping(name, userType, isGroup) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
return updatedAt, nil
}
// PolicyDBUpdateBuiltin - adds or removes policies from a user or a group
// verified to be an internal IDP user.
func (sys *IAMSys) PolicyDBUpdateBuiltin(ctx context.Context, isAttach bool,
r madmin.PolicyAssociationReq,
) (updatedAt time.Time, addedOrRemoved, effectivePolicies []string, err error) {
if !sys.Initialized() {
err = errServerNotInitialized
return
}
userOrGroup := r.User
var isGroup bool
if userOrGroup == "" {
isGroup = true
userOrGroup = r.Group
}
if isGroup {
_, err = sys.GetGroupDescription(userOrGroup)
if err != nil {
return
}
} else {
var isTemp bool
isTemp, _, err = sys.IsTempUser(userOrGroup)
if err != nil && err != errNoSuchUser {
return
}
if isTemp {
err = errIAMActionNotAllowed
return
}
// When the user is root credential you are not allowed to
// add policies for root user.
if userOrGroup == globalActiveCred.AccessKey {
err = errIAMActionNotAllowed
return
}
// Validate that user exists.
var userExists bool
_, userExists = sys.GetUser(ctx, userOrGroup)
if !userExists {
err = errNoSuchUser
return
}
}
updatedAt, addedOrRemoved, effectivePolicies, err = sys.store.PolicyDBUpdate(ctx, userOrGroup, isGroup,
regUser, r.Policies, isAttach)
if err != nil {
return
}
// Notify all other MinIO peers to reload policy
if !sys.HasWatcher() {
for _, nerr := range globalNotificationSys.LoadPolicyMapping(userOrGroup, regUser, isGroup) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemPolicyMapping,
PolicyMapping: &madmin.SRPolicyMapping{
UserOrGroup: userOrGroup,
UserType: int(regUser),
IsGroup: isGroup,
Policy: strings.Join(effectivePolicies, ","),
},
UpdatedAt: updatedAt,
}))
return
}
// PolicyDBUpdateLDAP - adds or removes policies from a user or a group verified
// to be in the LDAP directory.
func (sys *IAMSys) PolicyDBUpdateLDAP(ctx context.Context, isAttach bool,
r madmin.PolicyAssociationReq,
) (updatedAt time.Time, addedOrRemoved, effectivePolicies []string, err error) {
if !sys.Initialized() {
err = errServerNotInitialized
return
}
var dn string
var isGroup bool
if r.User != "" {
dn, err = sys.LDAPConfig.DoesUsernameExist(r.User)
if err != nil {
logger.LogIf(ctx, err)
return
}
if dn == "" {
err = errNoSuchUser
return
}
isGroup = false
} else {
var exists bool
if exists, err = sys.LDAPConfig.DoesGroupDNExist(r.Group); err != nil {
logger.LogIf(ctx, err)
return
} else if !exists {
err = errNoSuchGroup
return
}
dn = r.Group
isGroup = true
}
userType := stsUser
updatedAt, addedOrRemoved, effectivePolicies, err = sys.store.PolicyDBUpdate(ctx, dn, isGroup,
userType, r.Policies, isAttach)
if err != nil {
return
}
// Notify all other MinIO peers to reload policy
if !sys.HasWatcher() {
for _, nerr := range globalNotificationSys.LoadPolicyMapping(dn, userType, isGroup) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemPolicyMapping,
PolicyMapping: &madmin.SRPolicyMapping{
UserOrGroup: dn,
UserType: int(userType),
IsGroup: isGroup,
Policy: strings.Join(effectivePolicies, ","),
},
UpdatedAt: updatedAt,
}))
return
}
// PolicyDBGet - gets policy set on a user or group. If a list of groups is
// given, policies associated with them are included as well.
func (sys *IAMSys) PolicyDBGet(name string, isGroup bool, groups ...string) ([]string, error) {
if !sys.Initialized() {
return nil, errServerNotInitialized
}
return sys.store.PolicyDBGet(name, isGroup, groups...)
}
const sessionPolicyNameExtracted = iampolicy.SessionPolicyName + "-extracted"
// IsAllowedServiceAccount - checks if the given service account is allowed to perform
// actions. The permission of the parent user is checked first
func (sys *IAMSys) IsAllowedServiceAccount(args iampolicy.Args, parentUser string) bool {
// Verify if the parent claim matches the parentUser.
p, ok := args.Claims[parentClaim]
if ok {
parentInClaim, ok := p.(string)
if !ok {
// Reject malformed/malicious requests.
return false
}
// The parent claim in the session token should be equal
// to the parent detected in the backend
if parentInClaim != parentUser {
return false
}
} else {
// This is needed so a malicious user cannot
// use a leaked session key of another user
// to widen its privileges.
return false
}
isOwnerDerived := parentUser == globalActiveCred.AccessKey
var err error
var svcPolicies []string
roleArn := args.GetRoleArn()
switch {
case isOwnerDerived:
// All actions are allowed by default and no policy evaluation is
// required.
case roleArn != "":
arn, err := arn.Parse(roleArn)
if err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("error parsing role ARN %s: %v", roleArn, err))
return false
}
svcPolicies = newMappedPolicy(sys.rolesMap[arn]).toSlice()
default:
// Check policy for parent user of service account.
svcPolicies, err = sys.PolicyDBGet(parentUser, false, args.Groups...)
if err != nil {
logger.LogIf(GlobalContext, err)
return false
}
// Finally, if there is no parent policy, check if a policy claim is
// present.
if len(svcPolicies) == 0 {
policySet, _ := iampolicy.GetPoliciesFromClaims(args.Claims, iamPolicyClaimNameOpenID())
svcPolicies = policySet.ToSlice()
}
}
// Defensive code: Do not allow any operation if no policy is found.
if !isOwnerDerived && len(svcPolicies) == 0 {
return false
}
var combinedPolicy iampolicy.Policy
// Policies were found, evaluate all of them.
if !isOwnerDerived {
availablePoliciesStr, c := sys.store.FilterPolicies(strings.Join(svcPolicies, ","), "")
if availablePoliciesStr == "" {
return false
}
combinedPolicy = c
}
parentArgs := args
parentArgs.AccountName = parentUser
saPolicyClaim, ok := args.Claims[iamPolicyClaimNameSA()]
if !ok {
return false
}
saPolicyClaimStr, ok := saPolicyClaim.(string)
if !ok {
// Sub policy if set, should be a string reject
// malformed/malicious requests.
return false
}
if saPolicyClaimStr == inheritedPolicyType {
return isOwnerDerived || combinedPolicy.IsAllowed(parentArgs)
}
// Now check if we have a sessionPolicy.
spolicy, ok := args.Claims[sessionPolicyNameExtracted]
if !ok {
return false
}
spolicyStr, ok := spolicy.(string)
if !ok {
// Sub policy if set, should be a string reject
// malformed/malicious requests.
return false
}
// Check if policy is parseable.
subPolicy, err := iampolicy.ParseConfig(bytes.NewReader([]byte(spolicyStr)))
if err != nil {
// Log any error in input session policy config.
logger.LogIf(GlobalContext, err)
return false
}
// This can only happen if policy was set but with an empty JSON.
if subPolicy.Version == "" && len(subPolicy.Statements) == 0 {
return isOwnerDerived || combinedPolicy.IsAllowed(parentArgs)
}
if subPolicy.Version == "" {
return false
}
return subPolicy.IsAllowed(parentArgs) && (isOwnerDerived || combinedPolicy.IsAllowed(parentArgs))
}
// IsAllowedSTS is meant for STS based temporary credentials,
// which implements claims validation and verification other than
// applying policies.
func (sys *IAMSys) IsAllowedSTS(args iampolicy.Args, parentUser string) bool {
// 1. Determine mapped policies
isOwnerDerived := parentUser == globalActiveCred.AccessKey
var policies []string
roleArn := args.GetRoleArn()
switch {
case isOwnerDerived:
// All actions are allowed by default and no policy evaluation is
// required.
case roleArn != "":
// If a roleARN is present, the role policy is applied.
arn, err := arn.Parse(roleArn)
if err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("error parsing role ARN %s: %v", roleArn, err))
return false
}
policies = newMappedPolicy(sys.rolesMap[arn]).toSlice()
default:
// Otherwise, inherit parent user's policy
var err error
policies, err = sys.store.PolicyDBGet(parentUser, false, args.Groups...)
if err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("error fetching policies on %s: %v", parentUser, err))
return false
}
// Finally, if there is no parent policy, check if a policy claim is
// present in the session token.
if len(policies) == 0 {
// If there is no parent policy mapping, we fall back to
// using policy claim from JWT.
policySet, ok := args.GetPolicies(iamPolicyClaimNameOpenID())
if !ok {
// When claims are set, it should have a policy claim field.
return false
}
policies = policySet.ToSlice()
}
}
// Defensive code: Do not allow any operation if no policy is found in the session token
if !isOwnerDerived && len(policies) == 0 {
return false
}
// 2. Combine the mapped policies into a single combined policy.
var combinedPolicy iampolicy.Policy
if !isOwnerDerived {
var err error
combinedPolicy, err = sys.store.GetPolicy(strings.Join(policies, ","))
if errors.Is(err, errNoSuchPolicy) {
for _, pname := range policies {
_, err := sys.store.GetPolicy(pname)
if errors.Is(err, errNoSuchPolicy) {
// all policies presented in the claim should exist
logger.LogIf(GlobalContext, fmt.Errorf("expected policy (%s) missing from the JWT claim %s, rejecting the request", pname, iamPolicyClaimNameOpenID()))
return false
}
}
logger.LogIf(GlobalContext, fmt.Errorf("all policies were unexpectedly present!"))
return false
}
}
// 3. If an inline session-policy is present, evaluate it.
// Now check if we have a sessionPolicy.
hasSessionPolicy, isAllowedSP := isAllowedBySessionPolicy(args)
if hasSessionPolicy {
return isAllowedSP && (isOwnerDerived || combinedPolicy.IsAllowed(args))
}
// Sub policy not set, this is most common since subPolicy
// is optional, use the inherited policies.
return isOwnerDerived || combinedPolicy.IsAllowed(args)
}
func isAllowedBySessionPolicy(args iampolicy.Args) (hasSessionPolicy bool, isAllowed bool) {
hasSessionPolicy = false
isAllowed = false
// Now check if we have a sessionPolicy.
spolicy, ok := args.Claims[sessionPolicyNameExtracted]
if !ok {
return
}
hasSessionPolicy = true
spolicyStr, ok := spolicy.(string)
if !ok {
// Sub policy if set, should be a string reject
// malformed/malicious requests.
return
}
// Check if policy is parseable.
subPolicy, err := iampolicy.ParseConfig(bytes.NewReader([]byte(spolicyStr)))
if err != nil {
// Log any error in input session policy config.
logger.LogIf(GlobalContext, err)
return
}
// Policy without Version string value reject it.
if subPolicy.Version == "" {
return
}
// Sub policy is set and valid.
return hasSessionPolicy, subPolicy.IsAllowed(args)
}
// GetCombinedPolicy returns a combined policy combining all policies
func (sys *IAMSys) GetCombinedPolicy(policies ...string) iampolicy.Policy {
_, policy := sys.store.FilterPolicies(strings.Join(policies, ","), "")
return policy
}
// IsAllowed - checks given policy args is allowed to continue the Rest API.
func (sys *IAMSys) IsAllowed(args iampolicy.Args) bool {
// If opa is configured, use OPA always.
if authz := newGlobalAuthZPluginFn(); authz != nil {
ok, err := authz.IsAllowed(args)
if err != nil {
logger.LogIf(GlobalContext, err)
}
return ok
}
// Policies don't apply to the owner.
if args.IsOwner {
return true
}
// If the credential is temporary, perform STS related checks.
ok, parentUser, err := sys.IsTempUser(args.AccountName)
if err != nil {
return false
}
if ok {
return sys.IsAllowedSTS(args, parentUser)
}
// If the credential is for a service account, perform related check
ok, parentUser, err = sys.IsServiceAccount(args.AccountName)
if err != nil {
return false
}
if ok {
return sys.IsAllowedServiceAccount(args, parentUser)
}
// Continue with the assumption of a regular user
policies, err := sys.PolicyDBGet(args.AccountName, false, args.Groups...)
if err != nil {
return false
}
if len(policies) == 0 {
// No policy found.
return false
}
// Policies were found, evaluate all of them.
return sys.GetCombinedPolicy(policies...).IsAllowed(args)
}
// SetUsersSysType - sets the users system type, regular or LDAP.
func (sys *IAMSys) SetUsersSysType(t UsersSysType) {
sys.usersSysType = t
}
// GetUsersSysType - returns the users system type for this IAM
func (sys *IAMSys) GetUsersSysType() UsersSysType {
return sys.usersSysType
}
// NewIAMSys - creates new config system object.
func NewIAMSys() *IAMSys {
return &IAMSys{
usersSysType: MinIOUsersSysType,
configLoaded: make(chan struct{}),
}
}
<file_sep>#!/usr/bin/env bash
if [ -n "$TEST_DEBUG" ]; then
set -x
fi
trap 'catch $LINENO' ERR
# shellcheck disable=SC2120
catch() {
if [ $# -ne 0 ]; then
echo "error on line $1"
echo "dc1 server logs ========="
cat /tmp/dc1.log
echo "dc2 server logs ========="
cat /tmp/dc2.log
fi
echo "Cleaning up instances of MinIO"
set +e
pkill minio
pkill mc
rm -rf /tmp/xl/
}
catch
set -e
export MINIO_CI_CD=1
export MINIO_BROWSER=off
export MINIO_ROOT_USER="minio"
export MINIO_ROOT_PASSWORD="<PASSWORD>"
export MINIO_KMS_AUTO_ENCRYPTION=off
export MINIO_PROMETHEUS_AUTH_TYPE=public
export MINIO_KMS_SECRET_KEY=my-minio-key:<KEY>
unset MINIO_KMS_KES_CERT_FILE
unset MINIO_KMS_KES_KEY_FILE
unset MINIO_KMS_KES_ENDPOINT
unset MINIO_KMS_KES_KEY_NAME
if [ ! -f ./mc ]; then
wget --quiet -O mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
chmod +x mc
fi
mkdir -p /tmp/xl/1/ /tmp/xl/2/
export MINIO_KMS_SECRET_KEY="my-minio-key:<KEY>
export MINIO_ROOT_USER="minioadmin"
export MINIO_ROOT_PASSWORD="<PASSWORD>"
./minio server --address ":9001" /tmp/xl/1/{1...4}/ 2>&1 >/tmp/dc1.log &
./minio server --address ":9002" /tmp/xl/2/{1...4}/ 2>&1 >/tmp/dc2.log &
sleep 3
export MC_HOST_myminio1=http://minioadmin:minioadmin@localhost:9001
export MC_HOST_myminio2=http://minioadmin:minioadmin@localhost:9002
./mc mb myminio1/testbucket/
./mc version enable myminio1/testbucket/
./mc mb myminio2/testbucket/
./mc version enable myminio2/testbucket/
./mc replicate add myminio1/testbucket --remote-bucket http://minioadmin:minioadmin@localhost:9002/testbucket/ --priority 1
./mc cp README.md myminio1/testbucket/dir/file
./mc cp README.md myminio1/testbucket/dir/file
sleep 1s
echo "=== myminio1"
./mc ls --versions myminio1/testbucket/dir/file
echo "=== myminio2"
./mc ls --versions myminio2/testbucket/dir/file
versionId="$(mc ls --json --versions myminio1/testbucket/dir/ | tail -n1 | jq -r .versionId)"
aws s3api --endpoint-url http://localhost:9001 --profile minio delete-object --bucket testbucket --key dir/file --version-id "$versionId"
./mc ls -r --versions myminio1/testbucket >/tmp/myminio1.txt
./mc ls -r --versions myminio2/testbucket >/tmp/myminio2.txt
out=$(diff -qpruN /tmp/myminio1.txt /tmp/myminio2.txt)
ret=$?
if [ $ret -ne 0 ]; then
echo "BUG: expected no missing entries after replication: $out"
exit 1
fi
./mc rm myminio1/testbucket/dir/file
sleep 1s
./mc ls -r --versions myminio1/testbucket >/tmp/myminio1.txt
./mc ls -r --versions myminio2/testbucket >/tmp/myminio2.txt
out=$(diff -qpruN /tmp/myminio1.txt /tmp/myminio2.txt)
ret=$?
if [ $ret -ne 0 ]; then
echo "BUG: expected no missing entries after replication: $out"
exit 1
fi
echo "Success"
catch
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
// Package amztime implements AWS specific time parsing and deviations
package amztime
import (
"errors"
"testing"
"time"
)
func TestParse(t *testing.T) {
type testCase struct {
expectedErr error
expectedTime time.Time
timeStr string
}
testCases := []testCase{
{
ErrMalformedDate,
time.Time{},
"Tue Sep 6 07:10:23 PM PDT 2022",
},
{
nil,
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
"Tue, 10 Nov 2009 23:00:00 UTC",
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.timeStr, func(t *testing.T) {
gott, goterr := Parse(testCase.timeStr)
if !errors.Is(goterr, testCase.expectedErr) {
t.Errorf("expected %v, got %v", testCase.expectedErr, goterr)
}
if !gott.Equal(testCase.expectedTime) {
t.Errorf("expected %v, got %v", testCase.expectedTime, gott)
}
})
}
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"crypto/subtle"
"errors"
"fmt"
"io"
"os"
"strings"
"time"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/logger"
ftp "goftp.io/server/v2"
)
var _ ftp.Driver = &ftpDriver{}
// ftpDriver implements ftpDriver to store files in minio
type ftpDriver struct {
endpoint string
}
// NewFTPDriver implements ftp.Driver interface
func NewFTPDriver() ftp.Driver {
return &ftpDriver{endpoint: fmt.Sprintf("127.0.0.1:%s", globalMinioPort)}
}
func buildMinioPath(p string) string {
return strings.TrimPrefix(p, SlashSeparator)
}
func buildMinioDir(p string) string {
v := buildMinioPath(p)
if !strings.HasSuffix(v, SlashSeparator) {
return v + SlashSeparator
}
return v
}
type minioFileInfo struct {
p string
info minio.ObjectInfo
isDir bool
}
func (m *minioFileInfo) Name() string {
return m.p
}
func (m *minioFileInfo) Size() int64 {
return m.info.Size
}
func (m *minioFileInfo) Mode() os.FileMode {
if m.isDir {
return os.ModeDir
}
return os.ModePerm
}
func (m *minioFileInfo) ModTime() time.Time {
return m.info.LastModified
}
func (m *minioFileInfo) IsDir() bool {
return m.isDir
}
func (m *minioFileInfo) Sys() interface{} {
return nil
}
//msgp:ignore ftpMetrics
type ftpMetrics struct{}
var globalFtpMetrics ftpMetrics
func ftpTrace(s *ftp.Context, startTime time.Time, source, path string, err error) madmin.TraceInfo {
var errStr string
if err != nil {
errStr = err.Error()
}
return madmin.TraceInfo{
TraceType: madmin.TraceFTP,
Time: startTime,
NodeName: globalLocalNodeName,
FuncName: fmt.Sprintf("ftp USER=%s COMMAND=%s PARAM=%s ISLOGIN=%t, Source=%s", s.Sess.LoginUser(), s.Cmd, s.Param, s.Sess.IsLogin(), source),
Duration: time.Since(startTime),
Path: path,
Error: errStr,
}
}
func (m *ftpMetrics) log(s *ftp.Context, paths ...string) func(err error) {
startTime := time.Now()
source := getSource(2)
return func(err error) {
globalTrace.Publish(ftpTrace(s, startTime, source, strings.Join(paths, " "), err))
}
}
// Stat implements ftpDriver
func (driver *ftpDriver) Stat(ctx *ftp.Context, path string) (fi os.FileInfo, err error) {
stopFn := globalFtpMetrics.log(ctx, path)
defer stopFn(err)
if path == SlashSeparator {
return &minioFileInfo{
p: SlashSeparator,
isDir: true,
}, nil
}
bucket, object := path2BucketObject(path)
if bucket == "" {
return nil, errors.New("bucket name cannot be empty")
}
clnt, err := driver.getMinIOClient(ctx)
if err != nil {
return nil, err
}
if object == "" {
ok, err := clnt.BucketExists(context.Background(), bucket)
if err != nil {
return nil, err
}
if !ok {
return nil, os.ErrNotExist
}
return &minioFileInfo{
p: pathClean(bucket),
info: minio.ObjectInfo{Key: bucket},
isDir: true,
}, nil
}
objInfo, err := clnt.StatObject(context.Background(), bucket, object, minio.StatObjectOptions{})
if err != nil {
if minio.ToErrorResponse(err).Code == "NoSuchKey" {
// dummy return to satisfy LIST (stat -> list) behavior.
return &minioFileInfo{
p: pathClean(object),
info: minio.ObjectInfo{Key: object},
isDir: true,
}, nil
}
return nil, err
}
isDir := strings.HasSuffix(objInfo.Key, SlashSeparator)
return &minioFileInfo{
p: pathClean(object),
info: objInfo,
isDir: isDir,
}, nil
}
// ListDir implements ftpDriver
func (driver *ftpDriver) ListDir(ctx *ftp.Context, path string, callback func(os.FileInfo) error) (err error) {
stopFn := globalFtpMetrics.log(ctx, path)
defer stopFn(err)
clnt, err := driver.getMinIOClient(ctx)
if err != nil {
return err
}
cctx, cancel := context.WithCancel(context.Background())
defer cancel()
bucket, prefix := path2BucketObject(path)
if bucket == "" {
buckets, err := clnt.ListBuckets(cctx)
if err != nil {
return err
}
for _, bucket := range buckets {
info := minioFileInfo{
p: pathClean(bucket.Name),
info: minio.ObjectInfo{Key: retainSlash(bucket.Name), LastModified: bucket.CreationDate},
isDir: true,
}
if err := callback(&info); err != nil {
return err
}
}
return nil
}
prefix = retainSlash(prefix)
for object := range clnt.ListObjects(cctx, bucket, minio.ListObjectsOptions{
Prefix: prefix,
Recursive: false,
}) {
if object.Err != nil {
return object.Err
}
if object.Key == prefix {
continue
}
isDir := strings.HasSuffix(object.Key, SlashSeparator)
info := minioFileInfo{
p: pathClean(strings.TrimPrefix(object.Key, prefix)),
info: object,
isDir: isDir,
}
if err := callback(&info); err != nil {
return err
}
}
return nil
}
func (driver *ftpDriver) CheckPasswd(c *ftp.Context, username, password string) (ok bool, err error) {
stopFn := globalFtpMetrics.log(c, username)
defer stopFn(err)
if globalIAMSys.LDAPConfig.Enabled() {
ldapUserDN, groupDistNames, err := globalIAMSys.LDAPConfig.Bind(username, password)
if err != nil {
return false, err
}
ldapPolicies, _ := globalIAMSys.PolicyDBGet(ldapUserDN, false, groupDistNames...)
if len(ldapPolicies) == 0 {
// no policy associated reject it.
return false, nil
}
return true, nil
}
ui, ok := globalIAMSys.GetUser(context.Background(), username)
if !ok {
return false, nil
}
return subtle.ConstantTimeCompare([]byte(ui.Credentials.SecretKey), []byte(password)) == 1, nil
}
func (driver *ftpDriver) getMinIOClient(ctx *ftp.Context) (*minio.Client, error) {
ui, ok := globalIAMSys.GetUser(context.Background(), ctx.Sess.LoginUser())
if !ok && !globalIAMSys.LDAPConfig.Enabled() {
return nil, errNoSuchUser
}
if !ok && globalIAMSys.LDAPConfig.Enabled() {
targetUser, targetGroups, err := globalIAMSys.LDAPConfig.LookupUserDN(ctx.Sess.LoginUser())
if err != nil {
return nil, err
}
ldapPolicies, _ := globalIAMSys.PolicyDBGet(targetUser, false, targetGroups...)
if len(ldapPolicies) == 0 {
return nil, errAuthentication
}
expiryDur, err := globalIAMSys.LDAPConfig.GetExpiryDuration("")
if err != nil {
return nil, err
}
claims := make(map[string]interface{})
claims[expClaim] = UTCNow().Add(expiryDur).Unix()
claims[ldapUser] = targetUser
claims[ldapUserN] = ctx.Sess.LoginUser()
cred, err := auth.GetNewCredentialsWithMetadata(claims, globalActiveCred.SecretKey)
if err != nil {
return nil, err
}
// Set the parent of the temporary access key, this is useful
// in obtaining service accounts by this cred.
cred.ParentUser = targetUser
// Set this value to LDAP groups, LDAP user can be part
// of large number of groups
cred.Groups = targetGroups
// Set the newly generated credentials, policyName is empty on purpose
// LDAP policies are applied automatically using their ldapUser, ldapGroups
// mapping.
updatedAt, err := globalIAMSys.SetTempUser(context.Background(), cred.AccessKey, cred, "")
if err != nil {
return nil, err
}
// Call hook for site replication.
logger.LogIf(context.Background(), globalSiteReplicationSys.IAMChangeHook(context.Background(), madmin.SRIAMItem{
Type: madmin.SRIAMItemSTSAcc,
STSCredential: &madmin.SRSTSCredential{
AccessKey: cred.AccessKey,
SecretKey: cred.SecretKey,
SessionToken: cred.SessionToken,
ParentUser: cred.ParentUser,
},
UpdatedAt: updatedAt,
}))
return minio.New(driver.endpoint, &minio.Options{
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
Secure: globalIsTLS,
Transport: globalRemoteTargetTransport,
})
}
// ok == true - at this point
if ui.Credentials.IsTemp() {
// Temporary credentials are not allowed.
return nil, errAuthentication
}
return minio.New(driver.endpoint, &minio.Options{
Creds: credentials.NewStaticV4(ui.Credentials.AccessKey, ui.Credentials.SecretKey, ""),
Secure: globalIsTLS,
Transport: globalRemoteTargetTransport,
})
}
// DeleteDir implements ftpDriver
func (driver *ftpDriver) DeleteDir(ctx *ftp.Context, path string) (err error) {
stopFn := globalFtpMetrics.log(ctx, path)
defer stopFn(err)
bucket, prefix := path2BucketObject(path)
if bucket == "" {
return errors.New("deleting all buckets not allowed")
}
clnt, err := driver.getMinIOClient(ctx)
if err != nil {
return err
}
cctx, cancel := context.WithCancel(context.Background())
defer cancel()
objectsCh := make(chan minio.ObjectInfo)
// Send object names that are needed to be removed to objectsCh
go func() {
defer close(objectsCh)
opts := minio.ListObjectsOptions{Prefix: prefix, Recursive: true}
for object := range clnt.ListObjects(cctx, bucket, opts) {
if object.Err != nil {
return
}
objectsCh <- object
}
}()
// Call RemoveObjects API
for err := range clnt.RemoveObjects(context.Background(), bucket, objectsCh, minio.RemoveObjectsOptions{}) {
if err.Err != nil {
return err.Err
}
}
return nil
}
// DeleteFile implements ftpDriver
func (driver *ftpDriver) DeleteFile(ctx *ftp.Context, path string) (err error) {
stopFn := globalFtpMetrics.log(ctx, path)
defer stopFn(err)
bucket, object := path2BucketObject(path)
if bucket == "" {
return errors.New("bucket name cannot be empty")
}
clnt, err := driver.getMinIOClient(ctx)
if err != nil {
return err
}
return clnt.RemoveObject(context.Background(), bucket, object, minio.RemoveObjectOptions{})
}
// Rename implements ftpDriver
func (driver *ftpDriver) Rename(ctx *ftp.Context, fromPath string, toPath string) (err error) {
stopFn := globalFtpMetrics.log(ctx, fromPath, toPath)
defer stopFn(err)
return NotImplemented{}
}
// MakeDir implements ftpDriver
func (driver *ftpDriver) MakeDir(ctx *ftp.Context, path string) (err error) {
stopFn := globalFtpMetrics.log(ctx, path)
defer stopFn(err)
bucket, prefix := path2BucketObject(path)
if bucket == "" {
return errors.New("bucket name cannot be empty")
}
clnt, err := driver.getMinIOClient(ctx)
if err != nil {
return err
}
dirPath := buildMinioDir(prefix)
_, err = clnt.PutObject(context.Background(), bucket, dirPath, bytes.NewReader([]byte("")), 0,
// Always send Content-MD5 to succeed with bucket with
// locking enabled. There is no performance hit since
// this is always an empty object
minio.PutObjectOptions{SendContentMd5: true},
)
return err
}
// GetFile implements ftpDriver
func (driver *ftpDriver) GetFile(ctx *ftp.Context, path string, offset int64) (n int64, rc io.ReadCloser, err error) {
stopFn := globalFtpMetrics.log(ctx, path)
defer stopFn(err)
bucket, object := path2BucketObject(path)
if bucket == "" {
return 0, nil, errors.New("bucket name cannot be empty")
}
clnt, err := driver.getMinIOClient(ctx)
if err != nil {
return 0, nil, err
}
opts := minio.GetObjectOptions{}
obj, err := clnt.GetObject(context.Background(), bucket, object, opts)
if err != nil {
return 0, nil, err
}
defer func() {
if err != nil && obj != nil {
obj.Close()
}
}()
_, err = obj.Seek(offset, io.SeekStart)
if err != nil {
return 0, nil, err
}
info, err := obj.Stat()
if err != nil {
return 0, nil, err
}
return info.Size - offset, obj, nil
}
// PutFile implements ftpDriver
func (driver *ftpDriver) PutFile(ctx *ftp.Context, path string, data io.Reader, offset int64) (n int64, err error) {
stopFn := globalFtpMetrics.log(ctx, path)
defer stopFn(err)
bucket, object := path2BucketObject(path)
if bucket == "" {
return 0, errors.New("bucket name cannot be empty")
}
if offset != -1 {
// FTP - APPEND not implemented
return 0, NotImplemented{}
}
clnt, err := driver.getMinIOClient(ctx)
if err != nil {
return 0, err
}
info, err := clnt.PutObject(context.Background(), bucket, object, data, -1, minio.PutObjectOptions{
ContentType: "application/octet-stream",
SendContentMd5: true,
})
return info.Size, err
}
<file_sep>#!/bin/bash
if [ -n "$TEST_DEBUG" ]; then
set -x
fi
pkill minio
rm -rf /tmp/xl
rm -rf /tmp/xltier
if [ ! -f ./mc ]; then
wget --quiet -O mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
chmod +x mc
fi
export CI=true
(minio server /tmp/xl/{1...10}/disk{0...1} 2>&1 >/dev/null) &
pid=$!
sleep 2
export MC_HOST_myminio="http://minioadmin:minioadmin@localhost:9000/"
./mc admin user add myminio/ minio123 minio123
./mc admin user add myminio/ minio12345 minio12345
./mc admin policy create myminio/ rw ./docs/distributed/rw.json
./mc admin policy create myminio/ lake ./docs/distributed/rw.json
./mc admin policy attach myminio/ rw --user=minio123
./mc admin policy attach myminio/ lake,rw --user=minio12345
./mc mb -l myminio/versioned
./mc mirror internal myminio/versioned/ --quiet >/dev/null
## Soft delete (creates delete markers)
./mc rm -r --force myminio/versioned >/dev/null
## mirror again to create another set of version on top
./mc mirror internal myminio/versioned/ --quiet >/dev/null
expected_checksum=$(./mc cat internal/dsync/drwmutex.go | md5sum)
user_count=$(./mc admin user list myminio/ | wc -l)
policy_count=$(./mc admin policy list myminio/ | wc -l)
## create a warm tier instance
(minio server /tmp/xltier/{1...4}/disk{0...1} --address :9001 2>&1 >/dev/null) &
sleep 2
export MC_HOST_mytier="http://minioadmin:minioadmin@localhost:9001/"
./mc mb -l myminio/bucket2
./mc mb -l mytier/tiered
## create a tier and set up ilm policy to tier immediately
./mc admin tier add minio myminio TIER1 --endpoint http://localhost:9001 --access-key minioadmin --secret-key minioadmin --bucket tiered --prefix prefix5/
./mc ilm add myminio/bucket2 --transition-days 0 --transition-tier TIER1 --transition-days 0
## mirror some content to bucket2 and capture versions tiered
./mc mirror internal myminio/bucket2/ --quiet >/dev/null
./mc ls -r myminio/bucket2/ >bucket2_ns.txt
./mc ls -r --versions myminio/bucket2/ >bucket2_ns_versions.txt
sleep 2
./mc ls -r --versions mytier/tiered/ >tiered_ns_versions.txt
kill $pid
(minio server /tmp/xl/{1...10}/disk{0...1} /tmp/xl/{11...30}/disk{0...3} 2>&1 >/tmp/expanded.log) &
pid=$!
sleep 2
expanded_user_count=$(./mc admin user list myminio/ | wc -l)
expanded_policy_count=$(./mc admin policy list myminio/ | wc -l)
if [ $user_count -ne $expanded_user_count ]; then
echo "BUG: original user count differs from expanded setup"
exit 1
fi
if [ $policy_count -ne $expanded_policy_count ]; then
echo "BUG: original policy count differs from expanded setup"
exit 1
fi
./mc version info myminio/versioned | grep -q "versioning is enabled"
ret=$?
if [ $ret -ne 0 ]; then
echo "expected versioning enabled after expansion"
exit 1
fi
./mc mirror cmd myminio/versioned/ --quiet >/dev/null
./mc ls -r myminio/versioned/ >expanded_ns.txt
./mc ls -r --versions myminio/versioned/ >expanded_ns_versions.txt
./mc admin decom start myminio/ /tmp/xl/{1...10}/disk{0...1}
until $(./mc admin decom status myminio/ | grep -q Complete); do
echo "waiting for decom to finish..."
sleep 1
done
kill $pid
(minio server /tmp/xl/{11...30}/disk{0...3} 2>&1 >/dev/null) &
pid=$!
sleep 2
decom_user_count=$(./mc admin user list myminio/ | wc -l)
decom_policy_count=$(./mc admin policy list myminio/ | wc -l)
if [ $user_count -ne $decom_user_count ]; then
echo "BUG: original user count differs after decommission"
exit 1
fi
if [ $policy_count -ne $decom_policy_count ]; then
echo "BUG: original policy count differs after decommission"
exit 1
fi
./mc version info myminio/versioned | grep -q "versioning is enabled"
ret=$?
if [ $ret -ne 0 ]; then
echo "BUG: expected versioning enabled after decommission"
exit 1
fi
./mc ls -r myminio/versioned >decommissioned_ns.txt
./mc ls -r --versions myminio/versioned >decommissioned_ns_versions.txt
out=$(diff -qpruN expanded_ns.txt decommissioned_ns.txt)
ret=$?
if [ $ret -ne 0 ]; then
echo "BUG: expected no missing entries after decommission: $out"
exit 1
fi
out=$(diff -qpruN expanded_ns_versions.txt decommissioned_ns_versions.txt)
ret=$?
if [ $ret -ne 0 ]; then
echo "BUG: expected no missing entries after decommission: $out"
exit 1
fi
got_checksum=$(./mc cat myminio/versioned/dsync/drwmutex.go | md5sum)
if [ "${expected_checksum}" != "${got_checksum}" ]; then
echo "BUG: decommission failed on encrypted objects: expected ${expected_checksum} got ${got_checksum}"
exit 1
fi
# after decommissioning, compare listings in bucket2 and tiered
./mc version info myminio/bucket2 | grep -q "versioning is enabled"
ret=$?
if [ $ret -ne 0 ]; then
echo "BUG: expected versioning enabled after decommission on bucket2"
exit 1
fi
./mc ls -r myminio/bucket2 >decommissioned_bucket2_ns.txt
./mc ls -r --versions myminio/bucket2 >decommissioned_bucket2_ns_versions.txt
./mc ls -r --versions mytier/tiered/ >tiered_ns_versions2.txt
out=$(diff -qpruN bucket2_ns.txt decommissioned_bucket2_ns.txt)
ret=$?
if [ $ret -ne 0 ]; then
echo "BUG: expected no missing entries after decommission in bucket2: $out"
exit 1
fi
out=$(diff -qpruN bucket2_ns_versions.txt decommissioned_bucket2_ns_versions.txt)
ret=$?
if [ $ret -ne 0 ]; then
echo "BUG: expected no missing entries after decommission in bucket2x: $out"
exit 1
fi
out=$(diff -qpruN tiered_ns_versions.txt tiered_ns_versions2.txt)
ret=$?
if [ $ret -ne 0 ]; then
echo "BUG: expected no missing entries after decommission in warm tier: $out"
exit 1
fi
got_checksum=$(./mc cat myminio/bucket2/dsync/drwmutex.go | md5sum)
if [ "${expected_checksum}" != "${got_checksum}" ]; then
echo "BUG: decommission failed on encrypted objects with tiering: expected ${expected_checksum} got ${got_checksum}"
exit 1
fi
kill $pid
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"sync"
"github.com/dustin/go-humanize"
)
const (
// Block size used for all internal operations version 1.
// TLDR..
// Not used anymore xl.meta captures the right blockSize
// so blockSizeV2 should be used for all future purposes.
// this value is kept here to calculate the max API
// requests based on RAM size for existing content.
blockSizeV1 = 10 * humanize.MiByte
// Block size used in erasure coding version 2.
blockSizeV2 = 1 * humanize.MiByte
// Buckets meta prefix.
bucketMetaPrefix = "buckets"
// Deleted Buckets prefix.
deletedBucketsPrefix = ".deleted"
// ETag (hex encoded md5sum) of empty string.
emptyETag = "d41d8cd98f00b204e9800998ecf8427e"
)
// Global object layer mutex, used for safely updating object layer.
var globalObjLayerMutex sync.RWMutex
// Global object layer, only accessed by globalObjectAPI.
var globalObjectAPI ObjectLayer
// Global cacheObjects, only accessed by newCacheObjectsFn().
var globalCacheObjectAPI CacheObjectLayer
// Depending on the disk type network or local, initialize storage API.
func newStorageAPI(endpoint Endpoint, healthCheck bool) (storage StorageAPI, err error) {
if endpoint.IsLocal {
storage, err := newXLStorage(endpoint, healthCheck)
if err != nil {
return nil, err
}
return newXLStorageDiskIDCheck(storage), nil
}
return newStorageRESTClient(endpoint, healthCheck), nil
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bytes"
"context"
"crypto/sha256"
"flag"
"fmt"
"hash"
"io"
"log"
"net/url"
"os"
"strings"
"sync"
"time"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
)
var (
sourceEndpoint, sourceAccessKey, sourceSecretKey string
sourceBucket, sourcePrefix string
targetEndpoint, targetAccessKey, targetSecretKey string
targetBucket, targetPrefix string
minimumObjectAge string
debug bool
insecure bool
)
func buildS3Client(endpoint, accessKey, secretKey string, insecure bool) (*minio.Client, error) {
u, err := url.Parse(endpoint)
if err != nil {
return nil, err
}
secure := strings.EqualFold(u.Scheme, "https")
transport, err := minio.DefaultTransport(secure)
if err != nil {
return nil, err
}
if insecure {
// skip TLS verification
transport.TLSClientConfig.InsecureSkipVerify = true
}
clnt, err := minio.New(u.Host, &minio.Options{
Creds: credentials.NewStaticV4(accessKey, secretKey, ""),
Secure: secure,
Transport: transport,
})
if err != nil {
return nil, err
}
return clnt, nil
}
func main() {
flag.StringVar(&sourceEndpoint, "source-endpoint", "https://play.min.io", "S3 endpoint URL")
flag.StringVar(&sourceAccessKey, "source-access-key", "<KEY>", "S3 Access Key")
flag.StringVar(&sourceSecretKey, "source-secret-key", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", "S3 Secret Key")
flag.StringVar(&sourceBucket, "source-bucket", "", "Select a specific bucket")
flag.StringVar(&sourcePrefix, "source-prefix", "", "Select a prefix")
flag.StringVar(&targetEndpoint, "target-endpoint", "https://play.min.io", "S3 endpoint URL")
flag.StringVar(&targetAccessKey, "target-access-key", "<KEY>", "S3 Access Key")
flag.StringVar(&targetSecretKey, "target-secret-key", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", "S3 Secret Key")
flag.StringVar(&targetBucket, "target-bucket", "", "Select a specific bucket")
flag.StringVar(&targetPrefix, "target-prefix", "", "Select a prefix")
flag.StringVar(&minimumObjectAge, "minimum-object-age", "0s", "Ignore objects younger than the specified age")
flag.BoolVar(&debug, "debug", false, "Prints HTTP network calls to S3 endpoint")
flag.BoolVar(&insecure, "insecure", false, "Disable TLS verification")
flag.Parse()
if sourceEndpoint == "" {
log.Fatalln("source Endpoint is not provided")
}
if sourceAccessKey == "" {
log.Fatalln("source Access key is not provided")
}
if sourceSecretKey == "" {
log.Fatalln("source Secret key is not provided")
}
if sourceBucket == "" && sourcePrefix != "" {
log.Fatalln("--source-prefix is specified without --source-bucket.")
}
if targetEndpoint == "" {
log.Fatalln("target Endpoint is not provided")
}
if targetAccessKey == "" {
log.Fatalln("target Access key is not provided")
}
if targetSecretKey == "" {
log.Fatalln("target Secret key is not provided")
}
if targetBucket == "" && targetPrefix != "" {
log.Fatalln("--target-prefix is specified without --target-bucket.")
}
srcU, err := url.Parse(sourceEndpoint)
if err != nil {
log.Fatalln(err)
}
ssecure := strings.EqualFold(srcU.Scheme, "https")
stransport, err := minio.DefaultTransport(ssecure)
if err != nil {
log.Fatalln(err)
}
if insecure {
// skip TLS verification
stransport.TLSClientConfig.InsecureSkipVerify = true
}
ageDelta, err := time.ParseDuration(minimumObjectAge)
if err != nil {
log.Fatalln(err)
}
maxObjectModTime := time.Now().Add(-ageDelta)
// Next object is used to ignore new objects in the source & target
nextObject := func(ch <-chan minio.ObjectInfo) (ctnt minio.ObjectInfo, ok bool) {
for {
ctnt, ok := <-ch
if !ok {
return minio.ObjectInfo{}, false
}
if ctnt.LastModified.Before(maxObjectModTime) {
return ctnt, ok
}
}
}
sclnt, err := buildS3Client(sourceEndpoint, sourceAccessKey, sourceSecretKey, insecure)
if err != nil {
log.Fatalln(err)
}
tclnt, err := buildS3Client(targetEndpoint, targetAccessKey, targetSecretKey, insecure)
if err != nil {
log.Fatalln(err)
}
if debug {
sclnt.TraceOn(os.Stderr)
tclnt.TraceOn(os.Stderr)
}
sopts := minio.ListObjectsOptions{
Recursive: true,
Prefix: sourcePrefix,
}
topts := minio.ListObjectsOptions{
Recursive: true,
Prefix: targetPrefix,
}
srcCh := sclnt.ListObjects(context.Background(), sourceBucket, sopts)
tgtCh := tclnt.ListObjects(context.Background(), targetBucket, topts)
srcCtnt, srcOk := nextObject(srcCh)
tgtCtnt, tgtOk := nextObject(tgtCh)
var srcEOF, tgtEOF bool
srcSha256 := sha256.New()
tgtSha256 := sha256.New()
for {
srcSha256.Reset()
tgtSha256.Reset()
srcEOF = !srcOk
tgtEOF = !tgtOk
// No objects from source AND target: Finish
if srcEOF && tgtEOF {
break
}
if !srcEOF && srcCtnt.Err != nil {
log.Fatal(srcCtnt.Err)
}
if !tgtEOF && tgtCtnt.Err != nil {
log.Fatal(tgtCtnt.Err)
}
// If source doesn't have objects anymore, comparison becomes obvious
if srcEOF {
fmt.Printf("only in target: %s\n", tgtCtnt.Key)
tgtCtnt, tgtOk = <-tgtCh
continue
}
// The same for target
if tgtEOF {
fmt.Printf("only in source: %s\n", srcCtnt.Key)
srcCtnt, srcOk = nextObject(srcCh)
continue
}
if srcCtnt.Key < tgtCtnt.Key {
fmt.Printf("only in source: %s\n", srcCtnt.Key)
srcCtnt, srcOk = nextObject(srcCh)
continue
}
if srcCtnt.Key == tgtCtnt.Key {
if verifyChecksum(sclnt, srcSha256, tgtSha256, srcCtnt, tgtCtnt) {
fmt.Printf("all readable source and target: %s -> %s\n", srcCtnt.Key, tgtCtnt.Key)
}
srcCtnt, srcOk = nextObject(srcCh)
tgtCtnt, tgtOk = nextObject(tgtCh)
continue
}
fmt.Printf("only in target: %s\n", tgtCtnt.Key)
tgtCtnt, tgtOk = nextObject(tgtCh)
}
}
func verifyChecksum(sclnt *minio.Client, srcSha256, tgtSha256 hash.Hash, srcCtnt, tgtCtnt minio.ObjectInfo) (allgood bool) {
opts := minio.GetObjectOptions{}
if srcCtnt.Size != tgtCtnt.Size {
fmt.Printf("differ in size sourceSize: %d, targetSize: %d\n", srcCtnt.Size, tgtCtnt.Size)
return false
} else if srcCtnt.ContentType != tgtCtnt.ContentType {
fmt.Printf("differ in contentType source: %s, target: %s\n", srcCtnt.ContentType, tgtCtnt.ContentType)
return false
}
core := minio.Core{Client: sclnt}
sobj, _, _, err := core.GetObject(context.Background(), sourceBucket, srcCtnt.Key, opts)
if err != nil {
fmt.Printf("unreadable on source: %s (%s)\n", srcCtnt.Key, err)
return false
}
tobj, _, _, err := core.GetObject(context.Background(), targetBucket, tgtCtnt.Key, opts)
if err != nil {
fmt.Printf("unreadable on target: %s (%s)\n", tgtCtnt.Key, err)
return false
}
var sourceFailed, targetFailed bool
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
srcSize, err := io.Copy(srcSha256, sobj)
if err != nil {
fmt.Printf("unreadable on source: %s (%s)\n", srcCtnt.Key, err)
sourceFailed = true
} else if srcSize != srcCtnt.Size {
fmt.Printf("unreadable on source - size differs upon read: %s\n", srcCtnt.Key)
sourceFailed = true
}
}()
go func() {
defer wg.Done()
tgtSize, err := io.Copy(tgtSha256, tobj)
if err != nil {
fmt.Printf("unreadable on target: %s (%s)\n", tgtCtnt.Key, err)
targetFailed = true
} else if tgtSize != tgtCtnt.Size {
fmt.Printf("unreadable on target - size differs upon read: %s\n", tgtCtnt.Key)
targetFailed = true
}
}()
wg.Wait()
sobj.Close()
tobj.Close()
if !sourceFailed && !targetFailed {
ssum := srcSha256.Sum(nil)
tsum := tgtSha256.Sum(nil)
allgood = bytes.Equal(ssum, tsum)
if !allgood {
fmt.Printf("sha256 sum mismatch: %s -> Expected(%x), Found(%x)\n", srcCtnt.Key, ssum, tsum)
}
}
return allgood
}
<file_sep>#!/usr/bin/env bash
set -x
trap 'catch $LINENO' ERR
# shellcheck disable=SC2120
catch() {
if [ $# -ne 0 ]; then
echo "error on line $1"
for site in sitea siteb; do
echo "$site server logs ========="
cat "/tmp/${site}_1.log"
echo "==========================="
cat "/tmp/${site}_2.log"
done
fi
echo "Cleaning up instances of MinIO"
pkill minio
pkill -9 minio
rm -rf /tmp/multisitea
rm -rf /tmp/multisiteb
rm -rf /tmp/data
}
catch
set -e
export MINIO_CI_CD=1
export MINIO_BROWSER=off
export MINIO_ROOT_USER="minio"
export MINIO_ROOT_PASSWORD="<PASSWORD>"
export MINIO_KMS_AUTO_ENCRYPTION=off
export MINIO_PROMETHEUS_AUTH_TYPE=public
export MINIO_KMS_SECRET_KEY=my-minio-key:<KEY>
unset MINIO_KMS_KES_CERT_FILE
unset MINIO_KMS_KES_KEY_FILE
unset MINIO_KMS_KES_ENDPOINT
unset MINIO_KMS_KES_KEY_NAME
if [ ! -f ./mc ]; then
wget --quiet -O mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
chmod +x mc
fi
minio server --address 127.0.0.1:9001 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_1.log 2>&1 &
minio server --address 127.0.0.1:9002 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_2.log 2>&1 &
minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_1.log 2>&1 &
minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
sleep 10s
export MC_HOST_sitea=http://minio:[email protected]:9001
export MC_HOST_siteb=http://minio:[email protected]:9004
./mc mb sitea/bucket
## Create 100 files
mkdir -p /tmp/data
for i in $(seq 1 10); do
echo "T" >/tmp/data/file_${i}.txt
done
./mc mirror /tmp/data sitea/bucket/
./mc version enable sitea/bucket
./mc cp /tmp/data/file_1.txt sitea/bucket/marker
./mc rm sitea/bucket/marker
./mc mb siteb/bucket/
./mc version enable siteb/bucket/
echo "adding replication rule for site a -> site b"
./mc replicate add sitea/bucket/ \
--remote-bucket http://minio:[email protected]:9004/bucket
remote_arn=$(./mc replicate ls sitea/bucket --json | jq -r .rule.Destination.Bucket)
sleep 1
./mc replicate resync start sitea/bucket/ --remote-bucket "${remote_arn}"
sleep 20s ## sleep for 20s idea is that we give 200ms per object.
count=$(./mc replicate resync status sitea/bucket --remote-bucket "${remote_arn}" --json | jq .resyncInfo.target[].replicationCount)
./mc ls -r --versions sitea/bucket >/tmp/sitea.txt
./mc ls -r --versions siteb/bucket >/tmp/siteb.txt
out=$(diff -qpruN /tmp/sitea.txt /tmp/siteb.txt)
ret=$?
if [ $ret -ne 0 ]; then
echo "BUG: expected no missing entries after replication: $out"
exit 1
fi
if [ $count -ne 12 ]; then
echo "resync not complete after 10s unexpected failure"
./mc diff sitea/bucket siteb/bucket
exit 1
fi
./mc cp /tmp/data/file_1.txt sitea/bucket/marker_new
./mc rm sitea/bucket/marker_new
sleep 12s ## sleep for 12s idea is that we give 100ms per object.
./mc ls -r --versions sitea/bucket >/tmp/sitea.txt
./mc ls -r --versions siteb/bucket >/tmp/siteb.txt
out=$(diff -qpruN /tmp/sitea.txt /tmp/siteb.txt)
ret=$?
if [ $ret -ne 0 ]; then
echo "BUG: expected no 'diff' after replication: $out"
exit 1
fi
./mc rm -r --force --versions sitea/bucket/marker
sleep 14s ## sleep for 14s idea is that we give 100ms per object.
./mc ls -r --versions sitea/bucket >/tmp/sitea.txt
./mc ls -r --versions siteb/bucket >/tmp/siteb.txt
out=$(diff -qpruN /tmp/sitea.txt /tmp/siteb.txt)
ret=$?
if [ $ret -ne 0 ]; then
echo "BUG: expected no 'diff' after replication: $out"
exit 1
fi
./mc mb sitea/bucket-version/
./mc mb siteb/bucket-version
./mc version enable sitea/bucket-version/
./mc version enable siteb/bucket-version/
echo "adding replication rule for site a -> site b"
./mc replicate add sitea/bucket-version/ \
--remote-bucket http://minio:[email protected]:9004/bucket-version
./mc mb sitea/bucket-version/directory/
sleep 2s
./mc ls -r --versions sitea/bucket-version/ >/tmp/sitea_dirs.txt
./mc ls -r --versions siteb/bucket-version/ >/tmp/siteb_dirs.txt
out=$(diff -qpruN /tmp/sitea_dirs.txt /tmp/siteb_dirs.txt)
ret=$?
if [ $ret -ne 0 ]; then
echo "BUG: expected no 'diff' after replication: $out"
exit 1
fi
./mc rm -r --versions --force sitea/bucket-version/
sleep 2s
./mc ls -r --versions sitea/bucket-version/ >/tmp/sitea_dirs.txt
./mc ls -r --versions siteb/bucket-version/ >/tmp/siteb_dirs.txt
out=$(diff -qpruN /tmp/sitea_dirs.txt /tmp/siteb_dirs.txt)
ret=$?
if [ $ret -ne 0 ]; then
echo "BUG: expected no 'diff' after replication: $out"
exit 1
fi
## check if we don't create delete markers on the directory objects, its always permanent delete.
./mc mb sitea/bucket-version/directory/
sleep 2s
./mc rm -r --force sitea/bucket-version/
sleep 2s
./mc ls -r --versions sitea/bucket-version/ >/tmp/sitea_dirs.txt
./mc ls -r --versions siteb/bucket-version/ >/tmp/siteb_dirs.txt
out=$(diff -qpruN /tmp/sitea_dirs.txt /tmp/siteb_dirs.txt)
ret=$?
if [ $ret -ne 0 ]; then
echo "BUG: expected no 'diff' after replication: $out"
exit 1
fi
sitea_count=$(cat /tmp/sitea_dirs.txt | wc -l) # need to do it this way to avoid filename in the output
siteb_count=$(cat /tmp/siteb_dirs.txt | wc -l) # need to do it this way to avoid filename in the output
sitea_out=$(cat /tmp/sitea_dirs.txt)
siteb_out=$(cat /tmp/siteb_dirs.txt)
if [ $sitea_count -ne 0 ]; then
echo "BUG: expected no 'directory objects' left after deletion: ${sitea_out}"
exit 1
fi
if [ $siteb_count -ne 0 ]; then
echo "BUG: expected no 'directory objects' left after deletion: ${siteb_out}"
exit 1
fi
catch
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package hash
import (
"bytes"
"context"
"crypto/sha1"
"encoding/base64"
"encoding/binary"
"fmt"
"hash"
"hash/crc32"
"net/http"
"strings"
"github.com/minio/minio/internal/hash/sha256"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
)
// MinIOMultipartChecksum is as metadata on multipart uploads to indicate checksum type.
const MinIOMultipartChecksum = "x-minio-multipart-checksum"
// ChecksumType contains information about the checksum type.
type ChecksumType uint32
const (
// ChecksumTrailing indicates the checksum will be sent in the trailing header.
// Another checksum type will be set.
ChecksumTrailing ChecksumType = 1 << iota
// ChecksumSHA256 indicates a SHA256 checksum.
ChecksumSHA256
// ChecksumSHA1 indicates a SHA-1 checksum.
ChecksumSHA1
// ChecksumCRC32 indicates a CRC32 checksum with IEEE table.
ChecksumCRC32
// ChecksumCRC32C indicates a CRC32 checksum with Castagnoli table.
ChecksumCRC32C
// ChecksumInvalid indicates an invalid checksum.
ChecksumInvalid
// ChecksumMultipart indicates the checksum is from a multipart upload.
ChecksumMultipart
// ChecksumIncludesMultipart indicates the checksum also contains part checksums.
ChecksumIncludesMultipart
// ChecksumNone indicates no checksum.
ChecksumNone ChecksumType = 0
)
// Checksum is a type and base 64 encoded value.
type Checksum struct {
Type ChecksumType
Encoded string
Raw []byte
}
// Is returns if c is all of t.
func (c ChecksumType) Is(t ChecksumType) bool {
if t == ChecksumNone {
return c == ChecksumNone
}
return c&t == t
}
// Key returns the header key.
// returns empty string if invalid or none.
func (c ChecksumType) Key() string {
switch {
case c.Is(ChecksumCRC32):
return xhttp.AmzChecksumCRC32
case c.Is(ChecksumCRC32C):
return xhttp.AmzChecksumCRC32C
case c.Is(ChecksumSHA1):
return xhttp.AmzChecksumSHA1
case c.Is(ChecksumSHA256):
return xhttp.AmzChecksumSHA256
}
return ""
}
// RawByteLen returns the size of the un-encoded checksum.
func (c ChecksumType) RawByteLen() int {
switch {
case c.Is(ChecksumCRC32):
return 4
case c.Is(ChecksumCRC32C):
return 4
case c.Is(ChecksumSHA1):
return sha1.Size
case c.Is(ChecksumSHA256):
return sha256.Size
}
return 0
}
// IsSet returns whether the type is valid and known.
func (c ChecksumType) IsSet() bool {
return !c.Is(ChecksumInvalid) && !c.Is(ChecksumNone)
}
// NewChecksumType returns a checksum type based on the algorithm string.
func NewChecksumType(alg string) ChecksumType {
switch strings.ToUpper(alg) {
case "CRC32":
return ChecksumCRC32
case "CRC32C":
return ChecksumCRC32C
case "SHA1":
return ChecksumSHA1
case "SHA256":
return ChecksumSHA256
case "":
return ChecksumNone
}
return ChecksumInvalid
}
// String returns the type as a string.
func (c ChecksumType) String() string {
switch {
case c.Is(ChecksumCRC32):
return "CRC32"
case c.Is(ChecksumCRC32C):
return "CRC32C"
case c.Is(ChecksumSHA1):
return "SHA1"
case c.Is(ChecksumSHA256):
return "SHA256"
case c.Is(ChecksumNone):
return ""
}
return "invalid"
}
// Hasher returns a hasher corresponding to the checksum type.
// Returns nil if no checksum.
func (c ChecksumType) Hasher() hash.Hash {
switch {
case c.Is(ChecksumCRC32):
return crc32.NewIEEE()
case c.Is(ChecksumCRC32C):
return crc32.New(crc32.MakeTable(crc32.Castagnoli))
case c.Is(ChecksumSHA1):
return sha1.New()
case c.Is(ChecksumSHA256):
return sha256.New()
}
return nil
}
// Trailing return whether the checksum is traling.
func (c ChecksumType) Trailing() bool {
return c.Is(ChecksumTrailing)
}
// NewChecksumFromData returns a new checksum from specified algorithm and base64 encoded value.
func NewChecksumFromData(t ChecksumType, data []byte) *Checksum {
if !t.IsSet() {
return nil
}
h := t.Hasher()
h.Write(data)
raw := h.Sum(nil)
c := Checksum{Type: t, Encoded: base64.StdEncoding.EncodeToString(raw), Raw: raw}
if !c.Valid() {
return nil
}
return &c
}
// ReadCheckSums will read checksums from b and return them.
func ReadCheckSums(b []byte, part int) map[string]string {
res := make(map[string]string, 1)
for len(b) > 0 {
t, n := binary.Uvarint(b)
if n < 0 {
break
}
b = b[n:]
typ := ChecksumType(t)
length := typ.RawByteLen()
if length == 0 || len(b) < length {
break
}
cs := base64.StdEncoding.EncodeToString(b[:length])
b = b[length:]
if typ.Is(ChecksumMultipart) {
t, n := binary.Uvarint(b)
if n < 0 {
break
}
cs = fmt.Sprintf("%s-%d", cs, t)
b = b[n:]
if part > 0 {
cs = ""
}
if typ.Is(ChecksumIncludesMultipart) {
wantLen := int(t) * length
if len(b) < wantLen {
break
}
// Read part checksum
if part > 0 && uint64(part) <= t {
offset := (part - 1) * length
partCs := b[offset:]
cs = base64.StdEncoding.EncodeToString(partCs[:length])
}
b = b[wantLen:]
}
} else if part > 1 {
// For non-multipart, checksum is part 1.
cs = ""
}
if cs != "" {
res[typ.String()] = cs
}
}
if len(res) == 0 {
res = nil
}
return res
}
// NewChecksumWithType is similar to NewChecksumString but expects input algo of ChecksumType.
func NewChecksumWithType(alg ChecksumType, value string) *Checksum {
if !alg.IsSet() {
return nil
}
bvalue, err := base64.StdEncoding.DecodeString(value)
if err != nil {
return nil
}
c := Checksum{Type: alg, Encoded: value, Raw: bvalue}
if !c.Valid() {
return nil
}
return &c
}
// NewChecksumString returns a new checksum from specified algorithm and base64 encoded value.
func NewChecksumString(alg, value string) *Checksum {
return NewChecksumWithType(NewChecksumType(alg), value)
}
// AppendTo will append the checksum to b.
// 'parts' is used when checksum has ChecksumMultipart set.
// ReadCheckSums reads the values back.
func (c *Checksum) AppendTo(b []byte, parts []byte) []byte {
if c == nil {
return nil
}
var tmp [binary.MaxVarintLen32]byte
n := binary.PutUvarint(tmp[:], uint64(c.Type))
crc := c.Raw
if len(crc) != c.Type.RawByteLen() {
return b
}
b = append(b, tmp[:n]...)
b = append(b, crc...)
if c.Type.Is(ChecksumMultipart) {
var checksums int
// Ensure we don't divide by 0:
if c.Type.RawByteLen() == 0 || len(parts)%c.Type.RawByteLen() != 0 {
logger.LogIf(context.Background(), fmt.Errorf("internal error: Unexpected checksum length: %d, each checksum %d", len(parts), c.Type.RawByteLen()))
checksums = 0
parts = nil
} else {
checksums = len(parts) / c.Type.RawByteLen()
}
if !c.Type.Is(ChecksumIncludesMultipart) {
parts = nil
}
n := binary.PutUvarint(tmp[:], uint64(checksums))
b = append(b, tmp[:n]...)
if len(parts) > 0 {
b = append(b, parts...)
}
}
return b
}
// Valid returns whether checksum is valid.
func (c Checksum) Valid() bool {
if c.Type == ChecksumInvalid {
return false
}
if len(c.Encoded) == 0 || c.Type.Trailing() {
return c.Type.Is(ChecksumNone) || c.Type.Trailing()
}
raw := c.Raw
return c.Type.RawByteLen() == len(raw)
}
// Matches returns whether given content matches c.
func (c Checksum) Matches(content []byte) error {
if len(c.Encoded) == 0 {
return nil
}
hasher := c.Type.Hasher()
_, err := hasher.Write(content)
if err != nil {
return err
}
sum := hasher.Sum(nil)
if !bytes.Equal(sum, c.Raw) {
return ChecksumMismatch{
Want: c.Encoded,
Got: base64.StdEncoding.EncodeToString(sum),
}
}
return nil
}
// AsMap returns the
func (c *Checksum) AsMap() map[string]string {
if c == nil || !c.Valid() {
return nil
}
return map[string]string{c.Type.String(): c.Encoded}
}
// TransferChecksumHeader will transfer any checksum value that has been checked.
// If checksum was trailing, they must have been added to r.Trailer.
func TransferChecksumHeader(w http.ResponseWriter, r *http.Request) {
c, err := GetContentChecksum(r.Header)
if err != nil || c == nil {
return
}
t, s := c.Type, c.Encoded
if !c.Type.IsSet() {
return
}
if c.Type.Is(ChecksumTrailing) {
val := r.Trailer.Get(t.Key())
if val != "" {
w.Header().Set(t.Key(), val)
}
return
}
w.Header().Set(t.Key(), s)
}
// AddChecksumHeader will transfer any checksum value that has been checked.
func AddChecksumHeader(w http.ResponseWriter, c map[string]string) {
for k, v := range c {
cksum := NewChecksumString(k, v)
if cksum == nil {
continue
}
if cksum.Valid() {
w.Header().Set(cksum.Type.Key(), v)
}
}
}
// GetContentChecksum returns content checksum.
// Returns ErrInvalidChecksum if so.
// Returns nil, nil if no checksum.
func GetContentChecksum(h http.Header) (*Checksum, error) {
if trailing := h.Values(xhttp.AmzTrailer); len(trailing) > 0 {
var res *Checksum
for _, header := range trailing {
var duplicates bool
switch {
case strings.EqualFold(header, ChecksumCRC32C.Key()):
duplicates = res != nil
res = NewChecksumWithType(ChecksumCRC32C|ChecksumTrailing, "")
case strings.EqualFold(header, ChecksumCRC32.Key()):
duplicates = res != nil
res = NewChecksumWithType(ChecksumCRC32|ChecksumTrailing, "")
case strings.EqualFold(header, ChecksumSHA256.Key()):
duplicates = res != nil
res = NewChecksumWithType(ChecksumSHA256|ChecksumTrailing, "")
case strings.EqualFold(header, ChecksumSHA1.Key()):
duplicates = res != nil
res = NewChecksumWithType(ChecksumSHA1|ChecksumTrailing, "")
}
if duplicates {
return nil, ErrInvalidChecksum
}
}
if res != nil {
return res, nil
}
}
t, s := getContentChecksum(h)
if t == ChecksumNone {
if s == "" {
return nil, nil
}
return nil, ErrInvalidChecksum
}
cksum := NewChecksumWithType(t, s)
if cksum == nil {
return nil, ErrInvalidChecksum
}
return cksum, nil
}
// getContentChecksum returns content checksum type and value.
// Returns ChecksumInvalid if so.
func getContentChecksum(h http.Header) (t ChecksumType, s string) {
t = ChecksumNone
alg := h.Get(xhttp.AmzChecksumAlgo)
if alg != "" {
t |= NewChecksumType(alg)
if t.IsSet() {
hdr := t.Key()
if s = h.Get(hdr); s == "" {
return ChecksumNone, ""
}
}
return t, s
}
checkType := func(c ChecksumType) {
if got := h.Get(c.Key()); got != "" {
// If already set, invalid
if t != ChecksumNone {
t = ChecksumInvalid
s = ""
} else {
t = c
s = got
}
return
}
}
checkType(ChecksumCRC32)
checkType(ChecksumCRC32C)
checkType(ChecksumSHA1)
checkType(ChecksumSHA256)
return t, s
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"fmt"
"strconv"
"sync"
"time"
"github.com/minio/minio/internal/dsync"
)
// lockRequesterInfo stores various info from the client for each lock that is requested.
type lockRequesterInfo struct {
Name string // name of the resource lock was requested for
Writer bool // Bool whether write or read lock.
UID string // UID to uniquely identify request of client.
Timestamp time.Time // Timestamp set at the time of initialization.
TimeLastRefresh time.Time // Timestamp for last lock refresh.
Source string // Contains line, function and filename reqesting the lock.
Group bool // indicates if it was a group lock.
// Owner represents the UUID of the owner who originally requested the lock
// useful in expiry.
Owner string
// Quorum represents the quorum required for this lock to be active.
Quorum int
idx int
}
// isWriteLock returns whether the lock is a write or read lock.
func isWriteLock(lri []lockRequesterInfo) bool {
return len(lri) == 1 && lri[0].Writer
}
// localLocker implements Dsync.NetLocker
type localLocker struct {
mutex sync.Mutex
lockMap map[string][]lockRequesterInfo
lockUID map[string]string // UUID -> resource map.
}
func (l *localLocker) String() string {
return globalEndpoints.Localhost()
}
func (l *localLocker) canTakeLock(resources ...string) bool {
for _, resource := range resources {
_, lockTaken := l.lockMap[resource]
if lockTaken {
return false
}
}
return true
}
func (l *localLocker) Lock(ctx context.Context, args dsync.LockArgs) (reply bool, err error) {
if len(args.Resources) > maxDeleteList {
return false, fmt.Errorf("internal error: localLocker.Lock called with more than %d resources", maxDeleteList)
}
l.mutex.Lock()
defer l.mutex.Unlock()
if !l.canTakeLock(args.Resources...) {
// Not all locks can be taken on resources,
// reject it completely.
return false, nil
}
// No locks held on the all resources, so claim write
// lock on all resources at once.
for i, resource := range args.Resources {
l.lockMap[resource] = []lockRequesterInfo{
{
Name: resource,
Writer: true,
Source: args.Source,
Owner: args.Owner,
UID: args.UID,
Timestamp: UTCNow(),
TimeLastRefresh: UTCNow(),
Group: len(args.Resources) > 1,
Quorum: args.Quorum,
idx: i,
},
}
l.lockUID[formatUUID(args.UID, i)] = resource
}
return true, nil
}
func formatUUID(s string, idx int) string {
return s + strconv.Itoa(idx)
}
func (l *localLocker) Unlock(_ context.Context, args dsync.LockArgs) (reply bool, err error) {
if len(args.Resources) > maxDeleteList {
return false, fmt.Errorf("internal error: localLocker.Unlock called with more than %d resources", maxDeleteList)
}
l.mutex.Lock()
defer l.mutex.Unlock()
err = nil
for _, resource := range args.Resources {
lri, ok := l.lockMap[resource]
if ok && !isWriteLock(lri) {
// Unless it is a write lock reject it.
err = fmt.Errorf("unlock attempted on a read locked entity: %s", resource)
continue
}
if ok {
reply = l.removeEntry(resource, args, &lri) || reply
}
}
return
}
// removeEntry based on the uid of the lock message, removes a single entry from the
// lockRequesterInfo array or the whole array from the map (in case of a write lock
// or last read lock)
// UID and optionally owner must match for entries to be deleted.
func (l *localLocker) removeEntry(name string, args dsync.LockArgs, lri *[]lockRequesterInfo) bool {
// Find correct entry to remove based on uid.
for index, entry := range *lri {
if entry.UID == args.UID && (args.Owner == "" || entry.Owner == args.Owner) {
if len(*lri) == 1 {
// Remove the write lock.
delete(l.lockMap, name)
} else {
// Remove the appropriate read lock.
*lri = append((*lri)[:index], (*lri)[index+1:]...)
l.lockMap[name] = *lri
}
delete(l.lockUID, formatUUID(args.UID, entry.idx))
return true
}
}
// None found return false, perhaps entry removed in previous run.
return false
}
func (l *localLocker) RLock(ctx context.Context, args dsync.LockArgs) (reply bool, err error) {
if len(args.Resources) > 1 {
return false, fmt.Errorf("internal error: localLocker.RLock called with more than one resource")
}
l.mutex.Lock()
defer l.mutex.Unlock()
resource := args.Resources[0]
lrInfo := lockRequesterInfo{
Name: resource,
Writer: false,
Source: args.Source,
Owner: args.Owner,
UID: args.UID,
Timestamp: UTCNow(),
TimeLastRefresh: UTCNow(),
Quorum: args.Quorum,
}
if lri, ok := l.lockMap[resource]; ok {
if reply = !isWriteLock(lri); reply {
// Unless there is a write lock
l.lockMap[resource] = append(l.lockMap[resource], lrInfo)
l.lockUID[formatUUID(args.UID, 0)] = resource
}
} else {
// No locks held on the given name, so claim (first) read lock
l.lockMap[resource] = []lockRequesterInfo{lrInfo}
l.lockUID[formatUUID(args.UID, 0)] = resource
reply = true
}
return reply, nil
}
func (l *localLocker) RUnlock(_ context.Context, args dsync.LockArgs) (reply bool, err error) {
if len(args.Resources) > 1 {
return false, fmt.Errorf("internal error: localLocker.RUnlock called with more than one resource")
}
l.mutex.Lock()
defer l.mutex.Unlock()
var lri []lockRequesterInfo
resource := args.Resources[0]
if lri, reply = l.lockMap[resource]; !reply {
// No lock is held on the given name
return true, nil
}
if isWriteLock(lri) {
// A write-lock is held, cannot release a read lock
return false, fmt.Errorf("RUnlock attempted on a write locked entity: %s", resource)
}
l.removeEntry(resource, args, &lri)
return reply, nil
}
type lockStats struct {
Total int
Writes int
Reads int
}
func (l *localLocker) stats() lockStats {
l.mutex.Lock()
defer l.mutex.Unlock()
st := lockStats{Total: len(l.lockMap)}
for _, v := range l.lockMap {
if len(v) == 0 {
continue
}
entry := v[0]
if entry.Writer {
st.Writes++
} else {
st.Reads += len(v)
}
}
return st
}
func (l *localLocker) DupLockMap() map[string][]lockRequesterInfo {
l.mutex.Lock()
defer l.mutex.Unlock()
lockCopy := make(map[string][]lockRequesterInfo, len(l.lockMap))
for k, v := range l.lockMap {
if len(v) == 0 {
delete(l.lockMap, k)
continue
}
lockCopy[k] = append(make([]lockRequesterInfo, 0, len(v)), v...)
}
return lockCopy
}
func (l *localLocker) Close() error {
return nil
}
// IsOnline - local locker is always online.
func (l *localLocker) IsOnline() bool {
return true
}
// IsLocal - local locker returns true.
func (l *localLocker) IsLocal() bool {
return true
}
func (l *localLocker) ForceUnlock(ctx context.Context, args dsync.LockArgs) (reply bool, err error) {
select {
case <-ctx.Done():
return false, ctx.Err()
default:
l.mutex.Lock()
defer l.mutex.Unlock()
if len(args.UID) == 0 {
for _, resource := range args.Resources {
lris, ok := l.lockMap[resource]
if !ok {
continue
}
// Collect uids, so we don't mutate while we delete
uids := make([]string, 0, len(lris))
for _, lri := range lris {
uids = append(uids, lri.UID)
}
// Delete collected uids:
for _, uid := range uids {
lris, ok := l.lockMap[resource]
if !ok {
// Just to be safe, delete uuids.
for idx := 0; idx < maxDeleteList; idx++ {
mapID := formatUUID(uid, idx)
if _, ok := l.lockUID[mapID]; !ok {
break
}
delete(l.lockUID, mapID)
}
continue
}
l.removeEntry(resource, dsync.LockArgs{UID: uid}, &lris)
}
}
return true, nil
}
idx := 0
for {
mapID := formatUUID(args.UID, idx)
resource, ok := l.lockUID[mapID]
if !ok {
return idx > 0, nil
}
lris, ok := l.lockMap[resource]
if !ok {
// Unexpected inconsistency, delete.
delete(l.lockUID, mapID)
idx++
continue
}
reply = true
l.removeEntry(resource, dsync.LockArgs{UID: args.UID}, &lris)
idx++
}
}
}
func (l *localLocker) Refresh(ctx context.Context, args dsync.LockArgs) (refreshed bool, err error) {
select {
case <-ctx.Done():
return false, ctx.Err()
default:
l.mutex.Lock()
defer l.mutex.Unlock()
// Check whether uid is still active.
resource, ok := l.lockUID[formatUUID(args.UID, 0)]
if !ok {
return false, nil
}
idx := 0
for {
lris, ok := l.lockMap[resource]
if !ok {
// Inconsistent. Delete UID.
delete(l.lockUID, formatUUID(args.UID, idx))
return idx > 0, nil
}
for i := range lris {
if lris[i].UID == args.UID {
lris[i].TimeLastRefresh = UTCNow()
}
}
idx++
resource, ok = l.lockUID[formatUUID(args.UID, idx)]
if !ok {
// No more resources for UID, but we did update at least one.
return true, nil
}
}
}
}
// Similar to removeEntry but only removes an entry only if the lock entry exists in map.
// Caller must hold 'l.mutex' lock.
func (l *localLocker) expireOldLocks(interval time.Duration) {
l.mutex.Lock()
defer l.mutex.Unlock()
for k, lris := range l.lockMap {
modified := false
for i := 0; i < len(lris); {
lri := &lris[i]
if time.Since(lri.TimeLastRefresh) > interval {
delete(l.lockUID, formatUUID(lri.UID, lri.idx))
if len(lris) == 1 {
// Remove the write lock.
delete(l.lockMap, lri.Name)
modified = false
break
}
modified = true
// Remove the appropriate lock.
lris = append(lris[:i], lris[i+1:]...)
// Check same i
} else {
// Move to next
i++
}
}
if modified {
l.lockMap[k] = lris
}
}
}
func newLocker() *localLocker {
return &localLocker{
lockMap: make(map[string][]lockRequesterInfo, 1000),
lockUID: make(map[string]string, 1000),
}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package logger
import (
"context"
"errors"
"sync"
"time"
)
// LogOnce provides the function type for logger.LogOnceIf() function
type LogOnce func(ctx context.Context, err error, id string, errKind ...interface{})
type onceErr struct {
Err error
Count int
}
// Holds a map of recently logged errors.
type logOnceType struct {
IDMap map[string]onceErr
sync.Mutex
}
func (l *logOnceType) logOnceConsoleIf(ctx context.Context, err error, id string, errKind ...interface{}) {
if err == nil {
return
}
nerr := unwrapErrs(err)
l.Lock()
shouldLog := true
prev, ok := l.IDMap[id]
if !ok {
l.IDMap[id] = onceErr{
Err: nerr,
Count: 1,
}
} else if prev.Err.Error() == nerr.Error() {
// if errors are equal do not log.
prev.Count++
l.IDMap[id] = prev
shouldLog = false
}
l.Unlock()
if shouldLog {
consoleLogIf(ctx, err, errKind...)
}
}
const unwrapErrsDepth = 3
// unwrapErrs upto the point where errors.Unwrap(err) returns nil
func unwrapErrs(err error) (leafErr error) {
uerr := errors.Unwrap(err)
depth := 1
for uerr != nil {
// Save the current `uerr`
leafErr = uerr
// continue to look for leaf errors underneath
uerr = errors.Unwrap(leafErr)
depth++
if depth == unwrapErrsDepth {
// If we have reached enough depth we
// do not further recurse down, this
// is done to avoid any unnecessary
// latencies this might bring.
break
}
}
if uerr == nil {
leafErr = err
}
return leafErr
}
// One log message per error.
func (l *logOnceType) logOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
if err == nil {
return
}
nerr := unwrapErrs(err)
l.Lock()
shouldLog := true
prev, ok := l.IDMap[id]
if !ok {
l.IDMap[id] = onceErr{
Err: nerr,
Count: 1,
}
} else if prev.Err.Error() == nerr.Error() {
// if errors are equal do not log.
prev.Count++
l.IDMap[id] = prev
shouldLog = false
}
l.Unlock()
if shouldLog {
logIf(ctx, err, errKind...)
}
}
// Cleanup the map every one hour so that the log message is printed again for the user to notice.
func (l *logOnceType) cleanupRoutine() {
for {
time.Sleep(time.Hour)
l.Lock()
l.IDMap = make(map[string]onceErr)
l.Unlock()
}
}
// Returns logOnceType
func newLogOnceType() *logOnceType {
l := &logOnceType{IDMap: make(map[string]onceErr)}
go l.cleanupRoutine()
return l
}
var logOnce = newLogOnceType()
// LogOnceIf - Logs notification errors - once per error.
// id is a unique identifier for related log messages, refer to cmd/notification.go
// on how it is used.
func LogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
if logIgnoreError(err) {
return
}
logOnce.logOnceIf(ctx, err, id, errKind...)
}
// LogOnceConsoleIf - similar to LogOnceIf but exclusively only logs to console target.
func LogOnceConsoleIf(ctx context.Context, err error, id string, errKind ...interface{}) {
if logIgnoreError(err) {
return
}
logOnce.logOnceConsoleIf(ctx, err, id, errKind...)
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package lifecycle
import (
"encoding/xml"
"testing"
)
func TestTransitionUnmarshalXML(t *testing.T) {
trTests := []struct {
input string
err error
}{
{
input: `<Transition>
<Days>0</Days>
<StorageClass>S3TIER-1</StorageClass>
</Transition>`,
err: nil,
},
{
input: `<Transition>
<Days>1</Days>
<Date>2021-01-01T00:00:00Z</Date>
<StorageClass>S3TIER-1</StorageClass>
</Transition>`,
err: errTransitionInvalid,
},
{
input: `<Transition>
<Days>1</Days>
</Transition>`,
err: errXMLNotWellFormed,
},
}
for i, tc := range trTests {
var tr Transition
err := xml.Unmarshal([]byte(tc.input), &tr)
if err != nil {
t.Fatalf("%d: xml unmarshal failed with %v", i+1, err)
}
if err = tr.Validate(); err != tc.err {
t.Fatalf("%d: Invalid transition %v: err %v", i+1, tr, err)
}
}
ntrTests := []struct {
input string
err error
}{
{
input: `<NoncurrentVersionTransition>
<NoncurrentDays>0</NoncurrentDays>
<StorageClass>S3TIER-1</StorageClass>
</NoncurrentVersionTransition>`,
err: nil,
},
{
input: `<NoncurrentVersionTransition>
<Days>1</Days>
</NoncurrentVersionTransition>`,
err: errXMLNotWellFormed,
},
}
for i, tc := range ntrTests {
var ntr NoncurrentVersionTransition
err := xml.Unmarshal([]byte(tc.input), &ntr)
if err != nil {
t.Fatalf("%d: xml unmarshal failed with %v", i+1, err)
}
if err = ntr.Validate(); err != tc.err {
t.Fatalf("%d: Invalid noncurrent version transition %v: err %v", i+1, ntr, err)
}
}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package arn
import (
"fmt"
"regexp"
"strings"
)
// ARN structure:
//
// arn:partition:service:region:account-id:resource-type/resource-id
//
// In this implementation, account-id is empty.
//
// Reference: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
type arnPartition string
const (
arnPartitionMinio arnPartition = "minio"
)
type arnService string
const (
arnServiceIAM arnService = "iam"
)
type arnResourceType string
const (
arnResourceTypeRole arnResourceType = "role"
)
// ARN - representation of resources based on AWS ARNs.
type ARN struct {
Partition arnPartition
Service arnService
Region string
ResourceType arnResourceType
ResourceID string
}
// Allows english letters, numbers, '.', '-', '_' and '/'. Starts with a
// letter or digit. At least 1 character long.
var validResourceIDRegex = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9_/\.-]*$`)
// NewIAMRoleARN - returns an ARN for a role in MinIO.
func NewIAMRoleARN(resourceID, serverRegion string) (ARN, error) {
if !validResourceIDRegex.MatchString(resourceID) {
return ARN{}, fmt.Errorf("Invalid resource ID: %s", resourceID)
}
return ARN{
Partition: arnPartitionMinio,
Service: arnServiceIAM,
Region: serverRegion,
ResourceType: arnResourceTypeRole,
ResourceID: resourceID,
}, nil
}
// String - returns string representation of the ARN.
func (arn ARN) String() string {
return strings.Join(
[]string{
"arn",
string(arn.Partition),
string(arn.Service),
arn.Region,
"", // account-id is always empty in this implementation
string(arn.ResourceType) + "/" + arn.ResourceID,
},
":",
)
}
// Parse - parses an ARN string into a type.
func Parse(arnStr string) (arn ARN, err error) {
ps := strings.Split(arnStr, ":")
if len(ps) != 6 ||
ps[0] != "arn" {
err = fmt.Errorf("Invalid ARN string format")
return
}
if ps[1] != string(arnPartitionMinio) {
err = fmt.Errorf("Invalid ARN - bad partition field")
return
}
if ps[2] != string(arnServiceIAM) {
err = fmt.Errorf("Invalid ARN - bad service field")
return
}
// ps[3] is region and is not validated here. If the region is invalid,
// the ARN would not match any configured ARNs in the server.
if ps[4] != "" {
err = fmt.Errorf("Invalid ARN - unsupported account-id field")
return
}
res := strings.SplitN(ps[5], "/", 2)
if len(res) != 2 {
err = fmt.Errorf("Invalid ARN - resource does not contain a \"/\"")
return
}
if res[0] != string(arnResourceTypeRole) {
err = fmt.Errorf("Invalid ARN: resource type is invalid.")
return
}
if !validResourceIDRegex.MatchString(res[1]) {
err = fmt.Errorf("Invalid resource ID: %s", res[1])
return
}
arn = ARN{
Partition: arnPartitionMinio,
Service: arnServiceIAM,
Region: ps[3],
ResourceType: arnResourceTypeRole,
ResourceID: res[1],
}
return
}
<file_sep># Casdoor Quickstart Guide [](https://slack.min.io)
Casdoor is a UI-first centralized authentication / Single-Sign-On (SSO) platform supporting OAuth 2.0, OIDC and SAML, integrated with Casbin RBAC and ABAC permission management. This document covers configuring Casdoor identity provider support with MinIO.
## Prerequisites
Configure and install casdoor server by following [Casdoor Server Installation](https://casdoor.org/docs/basic/server-installation).
For a quick installation, docker-compose reference configs are also available on the [Casdoor Try with Docker](https://casdoor.org/docs/basic/try-with-docker).
### Configure Casdoor
- Go to Applications
- Create or use an existing Casdoor application
- Edit the application
- Copy `Client ID` and `Client secret`
- Add your redirect url (callback url) to `Redirect URLs`
- Save
- Go to Users
- Edit the user
- Add your MinIO policy (ex: `readwrite`) in `Tag`
- Save
- Open your favorite browser and visit: **http://`CASDOOR_ENDPOINT`/.well-known/openid-configuration**, you will see the OIDC configure of Casdoor.
### Configure MinIO
```
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=<PASSWORD>
minio server /mnt/export
```
Here are all the available options to configure OpenID connect
```
mc admin config set myminio/ identity_openid
KEY:
identity_openid enable OpenID SSO support
ARGS:
config_url* (url) openid discovery document e.g. "https://accounts.google.com/.well-known/openid-configuration"
client_id (string) unique public identifier for apps e.g. "292085223830.apps.googleusercontent.com"
claim_name (string) JWT canned policy claim name, defaults to "policy"
claim_prefix (string) JWT claim namespace prefix e.g. "customer1/"
scopes (csv) Comma separated list of OpenID scopes for server, defaults to advertised scopes from discovery document e.g. "email,admin"
comment (sentence) optionally add a comment to this setting
```
and ENV based options
```
mc admin config set myminio/ identity_openid --env
KEY:
identity_openid enable OpenID SSO support
ARGS:
MINIO_IDENTITY_OPENID_CONFIG_URL* (url) openid discovery document e.g. "https://accounts.google.com/.well-known/openid-configuration"
MINIO_IDENTITY_OPENID_CLIENT_ID (string) unique public identifier for apps e.g. "292085223830.apps.googleusercontent.com"
MINIO_IDENTITY_OPENID_CLAIM_NAME (string) JWT canned policy claim name, defaults to "policy"
MINIO_IDENTITY_OPENID_CLAIM_PREFIX (string) JWT claim namespace prefix e.g. "customer1/"
MINIO_IDENTITY_OPENID_SCOPES (csv) Comma separated list of OpenID scopes for server, defaults to advertised scopes from discovery document e.g. "email,admin"
MINIO_IDENTITY_OPENID_COMMENT (sentence) optionally add a comment to this setting
```
Set `identity_openid` config with `config_url`, `client_id` and restart MinIO
```
~ mc admin config set myminio identity_openid config_url="http://CASDOOR_ENDPOINT/.well-known/openid-configuration" client_id=<client id> client_secret=<client secret> claim_name="tag"
```
> NOTE: As MinIO needs to use a claim attribute in JWT for its policy, you should configure it in casdoor as well. Currently, casdoor uses `tag` as a workaround for configuring MinIO's policy.
Once successfully set restart the MinIO instance.
```
mc admin service restart myminio
```
### Using WebIdentiy API
On another terminal run `web-identity.go` a sample client application which obtains JWT id_tokens from an identity provider, in our case its Keycloak. Uses the returned id_token response to get new temporary credentials from the MinIO server using the STS API call `AssumeRoleWithWebIdentity`.
```
$ go run docs/sts/web-identity.go -cid account -csec 072e7f00-4289-469c-9ab2-bbe843c7f5a8 -config-ep "http://CASDOOR_ENDPOINT/.well-known/openid-configuration" -port 8888
2018/12/26 17:49:36 listening on http://localhost:8888/
```
This will open the login page of Casdoor, upon successful login, STS credentials along with any buckets discovered using the credentials will be printed on the screen, for example:
```
{
buckets: [ ],
credentials: {
AccessKeyID: "<KEY>",
SecretAccessKey: "<KEY>",
SessionToken: "<KEY>",
SignerType: 1
}
}
```
### Using MinIO Console
- Open MinIO URL on the browser, lets say <http://localhost:9000/>
- Click on `Login with SSO`
- User will be redirected to the Casdoor user login page, upon successful login the user will be redirected to MinIO page and logged in automatically,
the user should see now the buckets and objects they have access to.
## Explore Further
- [Casdoor MinIO Integration](https://casdoor.org/docs/integration/minio)
- [MinIO STS Quickstart Guide](https://min.io/docs/minio/linux/developers/security-token-service.html)
- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html)
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"sync"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/s3utils"
"github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/kms"
)
type fanOutOptions struct {
Kind crypto.Type
KeyID string
Key []byte
KmsCtx kms.Context
Checksum *hash.Checksum
}
// fanOutPutObject takes an input source reader and fans out multiple PUT operations
// based on the incoming fan-out request, a context cancelation by the caller
// would ensure all fan-out operations are canceled.
func fanOutPutObject(ctx context.Context, bucket string, objectAPI ObjectLayer, fanOutEntries []minio.PutObjectFanOutEntry, fanOutBuf []byte, opts fanOutOptions) ([]ObjectInfo, []error) {
errs := make([]error, len(fanOutEntries))
objInfos := make([]ObjectInfo, len(fanOutEntries))
var wg sync.WaitGroup
for i, req := range fanOutEntries {
wg.Add(1)
go func(idx int, req minio.PutObjectFanOutEntry) {
defer wg.Done()
objInfos[idx] = ObjectInfo{Name: req.Key}
hr, err := hash.NewReader(bytes.NewReader(fanOutBuf), int64(len(fanOutBuf)), "", "", -1)
if err != nil {
errs[idx] = err
return
}
reader := NewPutObjReader(hr)
defer func() {
if err := reader.Close(); err != nil {
errs[idx] = err
}
if err := hr.Close(); err != nil {
errs[idx] = err
}
}()
userDefined := make(map[string]string, len(req.UserMetadata))
for k, v := range req.UserMetadata {
userDefined[k] = v
}
userDefined[xhttp.AmzObjectTagging] = s3utils.TagEncode(req.UserTags)
if opts.Kind != nil {
encrd, objectEncryptionKey, err := newEncryptReader(ctx, hr, opts.Kind, opts.KeyID, opts.Key, bucket, req.Key, userDefined, opts.KmsCtx)
if err != nil {
errs[idx] = err
return
}
// do not try to verify encrypted content/
hr, err = hash.NewReader(encrd, -1, "", "", -1)
if err != nil {
errs[idx] = err
return
}
reader, err = reader.WithEncryption(hr, &objectEncryptionKey)
if err != nil {
errs[idx] = err
return
}
}
objInfo, err := objectAPI.PutObject(ctx, bucket, req.Key, reader, ObjectOptions{
Versioned: globalBucketVersioningSys.PrefixEnabled(bucket, req.Key),
VersionSuspended: globalBucketVersioningSys.PrefixSuspended(bucket, req.Key),
UserDefined: userDefined,
})
if err != nil {
errs[idx] = err
return
}
objInfos[idx] = objInfo
}(i, req)
}
wg.Wait()
return objInfos, errs
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package target
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"errors"
"io"
"net/http"
"strings"
"sync/atomic"
"syscall"
"time"
"github.com/minio/minio/internal/config/lambda/event"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/certs"
xnet "github.com/minio/pkg/net"
)
// Webhook constants
const (
WebhookEndpoint = "endpoint"
WebhookAuthToken = "<PASSWORD>token"
WebhookClientCert = "client_cert"
WebhookClientKey = "client_key"
EnvWebhookEnable = "MINIO_LAMBDA_WEBHOOK_ENABLE"
EnvWebhookEndpoint = "MINIO_LAMBDA_WEBHOOK_ENDPOINT"
EnvWebhookAuthToken = "MINIO_LAMBDA_WEBHOOK_AUTH_TOKEN"
EnvWebhookClientCert = "MINIO_LAMBDA_WEBHOOK_CLIENT_CERT"
EnvWebhookClientKey = "MINIO_LAMBDA_WEBHOOK_CLIENT_KEY"
)
// WebhookArgs - Webhook target arguments.
type WebhookArgs struct {
Enable bool `json:"enable"`
Endpoint xnet.URL `json:"endpoint"`
AuthToken string `json:"authToken"`
Transport *http.Transport `json:"-"`
ClientCert string `json:"clientCert"`
ClientKey string `json:"clientKey"`
}
// Validate WebhookArgs fields
func (w WebhookArgs) Validate() error {
if !w.Enable {
return nil
}
if w.Endpoint.IsEmpty() {
return errors.New("endpoint empty")
}
if w.ClientCert != "" && w.ClientKey == "" || w.ClientCert == "" && w.ClientKey != "" {
return errors.New("cert and key must be specified as a pair")
}
return nil
}
// WebhookTarget - Webhook target.
type WebhookTarget struct {
activeRequests int64
totalRequests int64
failedRequests int64
lazyInit lazyInit
id event.TargetID
args WebhookArgs
transport *http.Transport
httpClient *http.Client
loggerOnce logger.LogOnce
cancel context.CancelFunc
cancelCh <-chan struct{}
}
// ID - returns target ID.
func (target *WebhookTarget) ID() event.TargetID {
return target.id
}
// IsActive - Return true if target is up and active
func (target *WebhookTarget) IsActive() (bool, error) {
if err := target.init(); err != nil {
return false, err
}
return target.isActive()
}
// errNotConnected - indicates that the target connection is not active.
var errNotConnected = errors.New("not connected to target server/service")
func (target *WebhookTarget) isActive() (bool, error) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
req, err := http.NewRequestWithContext(ctx, http.MethodHead, target.args.Endpoint.String(), nil)
if err != nil {
if xnet.IsNetworkOrHostDown(err, false) {
return false, errNotConnected
}
return false, err
}
tokens := strings.Fields(target.args.AuthToken)
switch len(tokens) {
case 2:
req.Header.Set("Authorization", target.args.AuthToken)
case 1:
req.Header.Set("Authorization", "Bearer "+target.args.AuthToken)
}
resp, err := target.httpClient.Do(req)
if err != nil {
if xnet.IsNetworkOrHostDown(err, true) {
return false, errNotConnected
}
return false, err
}
io.Copy(io.Discard, resp.Body)
resp.Body.Close()
// No network failure i.e response from the target means its up
return true, nil
}
// Stat - returns lamdba webhook target statistics such as
// current calls in progress, successfully completed functions
// failed functions.
func (target *WebhookTarget) Stat() event.TargetStat {
return event.TargetStat{
ID: target.id,
ActiveRequests: atomic.LoadInt64(&target.activeRequests),
TotalRequests: atomic.LoadInt64(&target.totalRequests),
FailedRequests: atomic.LoadInt64(&target.failedRequests),
}
}
// Send - sends an event to the webhook.
func (target *WebhookTarget) Send(eventData event.Event) (resp *http.Response, err error) {
atomic.AddInt64(&target.activeRequests, 1)
defer atomic.AddInt64(&target.activeRequests, -1)
atomic.AddInt64(&target.totalRequests, 1)
defer func() {
if err != nil {
atomic.AddInt64(&target.failedRequests, 1)
}
}()
if err = target.init(); err != nil {
return nil, err
}
data, err := json.Marshal(eventData)
if err != nil {
return nil, err
}
req, err := http.NewRequest(http.MethodPost, target.args.Endpoint.String(), bytes.NewReader(data))
if err != nil {
return nil, err
}
// Verify if the authToken already contains
// <Key> <Token> like format, if this is
// already present we can blindly use the
// authToken as is instead of adding 'Bearer'
tokens := strings.Fields(target.args.AuthToken)
switch len(tokens) {
case 2:
req.Header.Set("Authorization", target.args.AuthToken)
case 1:
req.Header.Set("Authorization", "Bearer "+target.args.AuthToken)
}
req.Header.Set("Content-Type", "application/json")
return target.httpClient.Do(req)
}
// Close the target. Will cancel all active requests.
func (target *WebhookTarget) Close() error {
target.cancel()
return nil
}
func (target *WebhookTarget) init() error {
return target.lazyInit.Do(target.initWebhook)
}
// Only called from init()
func (target *WebhookTarget) initWebhook() error {
args := target.args
transport := target.transport
if args.ClientCert != "" && args.ClientKey != "" {
manager, err := certs.NewManager(context.Background(), args.ClientCert, args.ClientKey, tls.LoadX509KeyPair)
if err != nil {
return err
}
manager.ReloadOnSignal(syscall.SIGHUP) // allow reloads upon SIGHUP
transport.TLSClientConfig.GetClientCertificate = manager.GetClientCertificate
}
target.httpClient = &http.Client{Transport: transport}
yes, err := target.isActive()
if err != nil {
return err
}
if !yes {
return errNotConnected
}
return nil
}
// NewWebhookTarget - creates new Webhook target.
func NewWebhookTarget(ctx context.Context, id string, args WebhookArgs, loggerOnce logger.LogOnce, transport *http.Transport) (*WebhookTarget, error) {
ctx, cancel := context.WithCancel(ctx)
target := &WebhookTarget{
id: event.TargetID{ID: id, Name: "webhook"},
args: args,
loggerOnce: loggerOnce,
transport: transport,
cancel: cancel,
cancelCh: ctx.Done(),
}
return target, nil
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
// Package ioutil implements some I/O utility functions which are not covered
// by the standard library.
package ioutil
import (
"errors"
"io"
)
// ErrOverread is returned to the reader when the hard limit of HardLimitReader is exceeded.
var ErrOverread = errors.New("input provided more bytes than specified")
// HardLimitReader returns a Reader that reads from r
// but returns an error if the source provides more data than allowed.
// This means the source *will* be overread unless EOF is returned prior.
// The underlying implementation is a *HardLimitedReader.
// This will ensure that at most n bytes are returned and EOF is reached.
func HardLimitReader(r io.Reader, n int64) io.Reader { return &HardLimitedReader{r, n} }
// A HardLimitedReader reads from R but limits the amount of
// data returned to just N bytes. Each call to Read
// updates N to reflect the new amount remaining.
// Read returns EOF when N <= 0 or when the underlying R returns EOF.
type HardLimitedReader struct {
R io.Reader // underlying reader
N int64 // max bytes remaining
}
func (l *HardLimitedReader) Read(p []byte) (n int, err error) {
if l.N < 0 {
return 0, ErrOverread
}
n, err = l.R.Read(p)
l.N -= int64(n)
if l.N < 0 {
return 0, ErrOverread
}
return
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"crypto/subtle"
"fmt"
"io"
"net"
"strconv"
"strings"
"github.com/minio/cli"
"github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/pkg/sftp"
ftp "goftp.io/server/v2"
"golang.org/x/crypto/ssh"
)
// minioLogger use an instance of this to log in a standard format
type minioLogger struct{}
// Print implement Logger
func (log *minioLogger) Print(sessionID string, message interface{}) {
if serverDebugLog {
logger.Info("%s %s", sessionID, message)
}
}
// Printf implement Logger
func (log *minioLogger) Printf(sessionID string, format string, v ...interface{}) {
if serverDebugLog {
if sessionID != "" {
logger.Info("%s %s", sessionID, fmt.Sprintf(format, v...))
} else {
logger.Info(format, v...)
}
}
}
// PrintCommand impelment Logger
func (log *minioLogger) PrintCommand(sessionID string, command string, params string) {
if serverDebugLog {
if command == "PASS" {
logger.Info("%s > PASS ****", sessionID)
} else {
logger.Info("%s > %s %s", sessionID, command, params)
}
}
}
// PrintResponse impelment Logger
func (log *minioLogger) PrintResponse(sessionID string, code int, message string) {
if serverDebugLog {
logger.Info("%s < %d %s", sessionID, code, message)
}
}
func startSFTPServer(c *cli.Context) {
args := c.StringSlice("sftp")
var (
port int
publicIP string
sshPrivateKey string
)
var err error
for _, arg := range args {
tokens := strings.SplitN(arg, "=", 2)
if len(tokens) != 2 {
logger.Fatal(fmt.Errorf("invalid arguments passed to --sftp=%s", arg), "unable to start SFTP server")
}
switch tokens[0] {
case "address":
host, portStr, err := net.SplitHostPort(tokens[1])
if err != nil {
logger.Fatal(fmt.Errorf("invalid arguments passed to --sftp=%s (%v)", arg, err), "unable to start SFTP server")
}
port, err = strconv.Atoi(portStr)
if err != nil {
logger.Fatal(fmt.Errorf("invalid arguments passed to --sftp=%s (%v)", arg, err), "unable to start SFTP server")
}
if port < 1 || port > 65535 {
logger.Fatal(fmt.Errorf("invalid arguments passed to --sftp=%s, (port number must be between 1 to 65535)", arg), "unable to start SFTP server")
}
publicIP = host
case "ssh-private-key":
sshPrivateKey = tokens[1]
}
}
if port == 0 {
port = 8022 // Default SFTP port, since no port was given.
}
if sshPrivateKey == "" {
logger.Fatal(fmt.Errorf("invalid arguments passed, private key file is mandatory for --sftp='ssh-private-key=path/to/id_ecdsa'"), "unable to start SFTP server")
}
privateBytes, err := ioutil.ReadFile(sshPrivateKey)
if err != nil {
logger.Fatal(fmt.Errorf("invalid arguments passed, private key file is not accessible: %v", err), "unable to start SFTP server")
}
private, err := ssh.ParsePrivateKey(privateBytes)
if err != nil {
logger.Fatal(fmt.Errorf("invalid arguments passed, private key file is not parseable: %v", err), "unable to start SFTP server")
}
// An SSH server is represented by a ServerConfig, which holds
// certificate details and handles authentication of ServerConns.
config := &ssh.ServerConfig{
PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
ui, ok := globalIAMSys.GetUser(context.Background(), c.User())
if !ok {
return nil, errNoSuchUser
}
if subtle.ConstantTimeCompare([]byte(ui.Credentials.SecretKey), pass) == 1 {
return &ssh.Permissions{
CriticalOptions: map[string]string{
"accessKey": c.User(),
},
Extensions: make(map[string]string),
}, nil
}
return nil, errAuthentication
},
}
config.AddHostKey(private)
// Once a ServerConfig has been configured, connections can be accepted.
listener, err := net.Listen("tcp", net.JoinHostPort(publicIP, strconv.Itoa(port)))
if err != nil {
logger.Fatal(err, "unable to start listening on --sftp='port=%d'", port)
}
logger.Info(fmt.Sprintf("MinIO SFTP Server listening on %s", net.JoinHostPort(publicIP, strconv.Itoa(port))))
for {
nConn, err := listener.Accept()
if err != nil {
logger.LogIf(context.Background(), err)
continue
}
// Before use, a handshake must be performed on the incoming net.Conn.
sconn, chans, reqs, err := ssh.NewServerConn(nConn, config)
if err != nil {
logger.LogIf(context.Background(), err)
continue
}
// The incoming Request channel must be serviced.
go ssh.DiscardRequests(reqs)
// Service the incoming Channel channel.
for newChannel := range chans {
// Channels have a type, depending on the application level
// protocol intended. In the case of an SFTP session, this is "subsystem"
// with a payload string of "<length=4>sftp"
if newChannel.ChannelType() != "session" {
newChannel.Reject(ssh.UnknownChannelType, "unknown channel type")
continue
}
channel, requests, err := newChannel.Accept()
if err != nil {
logger.Fatal(err, "unable to accept the connection requests channel")
}
// Sessions have out-of-band requests such as "shell",
// "pty-req" and "env". Here we handle only the
// "subsystem" request.
go func(in <-chan *ssh.Request) {
for req := range in {
// We only reply to SSH packets that have `sftp` payload.
req.Reply(req.Type == "subsystem" && string(req.Payload[4:]) == "sftp", nil)
}
}(requests)
server := sftp.NewRequestServer(channel, NewSFTPDriver(sconn.Permissions))
if err := server.Serve(); err == io.EOF {
server.Close()
} else if err != nil {
logger.Fatal(err, "unable to start SFTP server")
}
}
}
}
func startFTPServer(c *cli.Context) {
args := c.StringSlice("ftp")
var (
port int
publicIP string
portRange string
tlsPrivateKey string
tlsPublicCert string
)
var err error
for _, arg := range args {
tokens := strings.SplitN(arg, "=", 2)
if len(tokens) != 2 {
logger.Fatal(fmt.Errorf("invalid arguments passed to --ftp=%s", arg), "unable to start FTP server")
}
switch tokens[0] {
case "address":
host, portStr, err := net.SplitHostPort(tokens[1])
if err != nil {
logger.Fatal(fmt.Errorf("invalid arguments passed to --ftp=%s (%v)", arg, err), "unable to start FTP server")
}
port, err = strconv.Atoi(portStr)
if err != nil {
logger.Fatal(fmt.Errorf("invalid arguments passed to --ftp=%s (%v)", arg, err), "unable to start FTP server")
}
if port < 1 || port > 65535 {
logger.Fatal(fmt.Errorf("invalid arguments passed to --ftp=%s, (port number must be between 1 to 65535)", arg), "unable to start FTP server")
}
publicIP = host
case "passive-port-range":
portRange = tokens[1]
case "tls-private-key":
tlsPrivateKey = tokens[1]
case "tls-public-cert":
tlsPublicCert = tokens[1]
}
}
// Verify if only partial inputs are given for FTP(secure)
{
if tlsPrivateKey == "" && tlsPublicCert != "" {
logger.Fatal(fmt.Errorf("invalid TLS arguments provided missing private key --ftp=\"tls-private-key=path/to/private.key\""), "unable to start FTP server")
}
if tlsPrivateKey != "" && tlsPublicCert == "" {
logger.Fatal(fmt.Errorf("invalid TLS arguments provided missing public cert --ftp=\"tls-public-cert=path/to/public.crt\""), "unable to start FTP server")
}
if port == 0 {
port = 8021 // Default FTP port, since no port was given.
}
}
// If no TLS certs were provided, server is running in TLS for S3 API
// we automatically make FTP also run under TLS mode.
if globalIsTLS && tlsPrivateKey == "" && tlsPublicCert == "" {
tlsPrivateKey = getPrivateKeyFile()
tlsPublicCert = getPublicCertFile()
}
tls := tlsPrivateKey != "" && tlsPublicCert != ""
name := "MinIO FTP Server"
if tls {
name = "MinIO FTP(Secure) Server"
}
ftpServer, err := ftp.NewServer(&ftp.Options{
Name: name,
WelcomeMessage: fmt.Sprintf("Welcome to MinIO FTP Server Version='%s' License='GNU AGPLv3'", Version),
Driver: NewFTPDriver(),
Port: port,
Perm: ftp.NewSimplePerm("nobody", "nobody"),
TLS: tls,
KeyFile: tlsPrivateKey,
CertFile: tlsPublicCert,
ExplicitFTPS: tls,
Logger: &minioLogger{},
PassivePorts: portRange,
PublicIP: publicIP,
})
if err != nil {
logger.Fatal(err, "unable to initialize FTP server")
}
logger.Info(fmt.Sprintf("%s listening on %s", name, net.JoinHostPort(publicIP, strconv.Itoa(port))))
if err = ftpServer.ListenAndServe(); err != nil {
logger.Fatal(err, "unable to start FTP server")
}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"os"
"strings"
"sync/atomic"
"time"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/disk"
ioutilx "github.com/minio/minio/internal/ioutil"
)
//go:generate stringer -type=osMetric -trimprefix=osMetric $GOFILE
type osMetric uint8
const (
osMetricRemoveAll osMetric = iota
osMetricMkdirAll
osMetricMkdir
osMetricRename
osMetricOpenFileW
osMetricOpenFileR
osMetricOpen
osMetricOpenFileDirectIO
osMetricLstat
osMetricRemove
osMetricStat
osMetricAccess
osMetricCreate
osMetricReadDirent
osMetricFdatasync
osMetricSync
// .... add more
osMetricLast
)
var globalOSMetrics osMetrics
func init() {
// Inject metrics.
ioutilx.OsOpenFile = OpenFile
ioutilx.OpenFileDirectIO = OpenFileDirectIO
ioutilx.OsOpen = Open
}
type osMetrics struct {
// All fields must be accessed atomically and aligned.
operations [osMetricLast]uint64
latency [osMetricLast]lockedLastMinuteLatency
}
// time an os action.
func (o *osMetrics) time(s osMetric) func() {
startTime := time.Now()
return func() {
duration := time.Since(startTime)
atomic.AddUint64(&o.operations[s], 1)
o.latency[s].add(duration)
}
}
// incTime will increment time on metric s with a specific duration.
func (o *osMetrics) incTime(s osMetric, d time.Duration) {
atomic.AddUint64(&o.operations[s], 1)
o.latency[s].add(d)
}
func osTrace(s osMetric, startTime time.Time, duration time.Duration, path string) madmin.TraceInfo {
return madmin.TraceInfo{
TraceType: madmin.TraceOS,
Time: startTime,
NodeName: globalLocalNodeName,
FuncName: "os." + s.String(),
Duration: duration,
Path: path,
}
}
func updateOSMetrics(s osMetric, paths ...string) func() {
if globalTrace.NumSubscribers(madmin.TraceOS) == 0 {
return globalOSMetrics.time(s)
}
startTime := time.Now()
return func() {
duration := time.Since(startTime)
globalOSMetrics.incTime(s, duration)
globalTrace.Publish(osTrace(s, startTime, duration, strings.Join(paths, " -> ")))
}
}
// RemoveAll captures time taken to call the underlying os.RemoveAll
func RemoveAll(dirPath string) error {
defer updateOSMetrics(osMetricRemoveAll, dirPath)()
return os.RemoveAll(dirPath)
}
// Mkdir captures time taken to call os.Mkdir
func Mkdir(dirPath string, mode os.FileMode) error {
defer updateOSMetrics(osMetricMkdir, dirPath)()
return os.Mkdir(dirPath, mode)
}
// MkdirAll captures time taken to call os.MkdirAll
func MkdirAll(dirPath string, mode os.FileMode) error {
defer updateOSMetrics(osMetricMkdirAll, dirPath)()
return osMkdirAll(dirPath, mode)
}
// Rename captures time taken to call os.Rename
func Rename(src, dst string) error {
defer updateOSMetrics(osMetricRename, src, dst)()
return os.Rename(src, dst)
}
// OpenFile captures time taken to call os.OpenFile
func OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) {
switch flag & writeMode {
case writeMode:
defer updateOSMetrics(osMetricOpenFileW, name)()
default:
defer updateOSMetrics(osMetricOpenFileR, name)()
}
return os.OpenFile(name, flag, perm)
}
// Access captures time taken to call syscall.Access()
// on windows, plan9 and solaris syscall.Access uses
// os.Lstat()
func Access(name string) error {
defer updateOSMetrics(osMetricAccess, name)()
return access(name)
}
// Open captures time taken to call os.Open
func Open(name string) (*os.File, error) {
defer updateOSMetrics(osMetricOpen, name)()
return os.Open(name)
}
// OpenFileDirectIO captures time taken to call disk.OpenFileDirectIO
func OpenFileDirectIO(name string, flag int, perm os.FileMode) (*os.File, error) {
defer updateOSMetrics(osMetricOpenFileDirectIO, name)()
return disk.OpenFileDirectIO(name, flag, perm)
}
// Lstat captures time taken to call os.Lstat
func Lstat(name string) (os.FileInfo, error) {
defer updateOSMetrics(osMetricLstat, name)()
return os.Lstat(name)
}
// Remove captures time taken to call os.Remove
func Remove(deletePath string) error {
defer updateOSMetrics(osMetricRemove, deletePath)()
return os.Remove(deletePath)
}
// Stat captures time taken to call os.Stat
func Stat(name string) (os.FileInfo, error) {
defer updateOSMetrics(osMetricStat, name)()
return os.Stat(name)
}
// Create captures time taken to call os.Create
func Create(name string) (*os.File, error) {
defer updateOSMetrics(osMetricCreate, name)()
return os.Create(name)
}
// Fdatasync captures time taken to call Fdatasync
func Fdatasync(f *os.File) error {
fn := ""
if f != nil {
fn = f.Name()
}
defer updateOSMetrics(osMetricFdatasync, fn)()
return disk.Fdatasync(f)
}
// report returns all os metrics.
func (o *osMetrics) report() madmin.OSMetrics {
var m madmin.OSMetrics
m.CollectedAt = time.Now()
m.LifeTimeOps = make(map[string]uint64, osMetricLast)
for i := osMetric(0); i < osMetricLast; i++ {
if n := atomic.LoadUint64(&o.operations[i]); n > 0 {
m.LifeTimeOps[i.String()] = n
}
}
if len(m.LifeTimeOps) == 0 {
m.LifeTimeOps = nil
}
m.LastMinute.Operations = make(map[string]madmin.TimedAction, osMetricLast)
for i := osMetric(0); i < osMetricLast; i++ {
lm := o.latency[i].total()
if lm.N > 0 {
m.LastMinute.Operations[i.String()] = lm.asTimedAction()
}
}
if len(m.LastMinute.Operations) == 0 {
m.LastMinute.Operations = nil
}
return m
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"encoding/gob"
"errors"
"io"
"net/url"
"strconv"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/rest"
"github.com/minio/pkg/sync/errgroup"
"golang.org/x/exp/slices"
)
var errPeerOffline = errors.New("peer is offline")
type peerS3Client interface {
ListBuckets(ctx context.Context, opts BucketOptions) ([]BucketInfo, error)
GetBucketInfo(ctx context.Context, bucket string, opts BucketOptions) (BucketInfo, error)
MakeBucket(ctx context.Context, bucket string, opts MakeBucketOptions) error
DeleteBucket(ctx context.Context, bucket string, opts DeleteBucketOptions) error
GetHost() string
SetPools([]int)
GetPools() []int
}
type localPeerS3Client struct {
host string
pools []int
}
func (l *localPeerS3Client) GetHost() string {
return l.host
}
func (l *localPeerS3Client) SetPools(p []int) {
l.pools = make([]int, len(p))
copy(l.pools, p)
}
func (l localPeerS3Client) GetPools() []int {
return l.pools
}
func (l localPeerS3Client) ListBuckets(ctx context.Context, opts BucketOptions) ([]BucketInfo, error) {
return listBucketsLocal(ctx, opts)
}
func (l localPeerS3Client) GetBucketInfo(ctx context.Context, bucket string, opts BucketOptions) (BucketInfo, error) {
return getBucketInfoLocal(ctx, bucket, opts)
}
func (l localPeerS3Client) MakeBucket(ctx context.Context, bucket string, opts MakeBucketOptions) error {
return makeBucketLocal(ctx, bucket, opts)
}
func (l localPeerS3Client) DeleteBucket(ctx context.Context, bucket string, opts DeleteBucketOptions) error {
return deleteBucketLocal(ctx, bucket, opts)
}
// client to talk to peer Nodes.
type remotePeerS3Client struct {
host string
pools []int
restClient *rest.Client
}
// Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected
// permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints()
// after verifying format.json
func (client *remotePeerS3Client) call(method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) {
return client.callWithContext(GlobalContext, method, values, body, length)
}
// Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected
// permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints()
// after verifying format.json
func (client *remotePeerS3Client) callWithContext(ctx context.Context, method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) {
if values == nil {
values = make(url.Values)
}
respBody, err = client.restClient.Call(ctx, method, values, body, length)
if err == nil {
return respBody, nil
}
err = toStorageErr(err)
return nil, err
}
// S3PeerSys - S3 peer call system.
type S3PeerSys struct {
peerClients []peerS3Client // Excludes self
poolsCount int
}
// NewS3PeerSys - creates new S3 peer calls.
func NewS3PeerSys(endpoints EndpointServerPools) *S3PeerSys {
return &S3PeerSys{
peerClients: newPeerS3Clients(endpoints.GetNodes()),
poolsCount: len(endpoints),
}
}
// ListBuckets lists buckets across all servers and returns a possible consistent view
func (sys *S3PeerSys) ListBuckets(ctx context.Context, opts BucketOptions) (result []BucketInfo, err error) {
g := errgroup.WithNErrs(len(sys.peerClients))
nodeBuckets := make([][]BucketInfo, len(sys.peerClients))
errs := []error{nil}
for idx, client := range sys.peerClients {
idx := idx
client := client
g.Go(func() error {
if client == nil {
return errPeerOffline
}
localBuckets, err := client.ListBuckets(ctx, opts)
if err != nil {
return err
}
nodeBuckets[idx] = localBuckets
return nil
}, idx)
}
errs = append(errs, g.Wait()...)
quorum := len(sys.peerClients)/2 + 1
if err = reduceReadQuorumErrs(ctx, errs, bucketOpIgnoredErrs, quorum); err != nil {
return nil, err
}
bucketsMap := make(map[string]struct{})
for idx, buckets := range nodeBuckets {
if errs[idx] != nil {
continue
}
for _, bi := range buckets {
_, ok := bucketsMap[bi.Name]
if !ok {
bucketsMap[bi.Name] = struct{}{}
result = append(result, bi)
}
}
}
return result, nil
}
// GetBucketInfo returns bucket stat info about bucket on disk across all peers
func (sys *S3PeerSys) GetBucketInfo(ctx context.Context, bucket string, opts BucketOptions) (binfo BucketInfo, err error) {
g := errgroup.WithNErrs(len(sys.peerClients))
bucketInfos := make([]BucketInfo, len(sys.peerClients))
for idx, client := range sys.peerClients {
idx := idx
client := client
g.Go(func() error {
if client == nil {
return errPeerOffline
}
bucketInfo, err := client.GetBucketInfo(ctx, bucket, opts)
if err != nil {
return err
}
bucketInfos[idx] = bucketInfo
return nil
}, idx)
}
errs := g.Wait()
quorum := len(sys.peerClients)/2 + 1
if err = reduceReadQuorumErrs(ctx, errs, bucketOpIgnoredErrs, quorum); err != nil {
return BucketInfo{}, toObjectErr(err, bucket)
}
for i, err := range errs {
if err == nil {
return bucketInfos[i], nil
}
}
return BucketInfo{}, toObjectErr(errVolumeNotFound, bucket)
}
func (client *remotePeerS3Client) ListBuckets(ctx context.Context, opts BucketOptions) ([]BucketInfo, error) {
v := url.Values{}
v.Set(peerS3BucketDeleted, strconv.FormatBool(opts.Deleted))
respBody, err := client.call(peerS3MethodListBuckets, v, nil, -1)
if err != nil {
return nil, err
}
defer xhttp.DrainBody(respBody)
var buckets []BucketInfo
err = gob.NewDecoder(respBody).Decode(&buckets)
return buckets, err
}
// GetBucketInfo returns bucket stat info from a peer
func (client *remotePeerS3Client) GetBucketInfo(ctx context.Context, bucket string, opts BucketOptions) (BucketInfo, error) {
v := url.Values{}
v.Set(peerS3Bucket, bucket)
v.Set(peerS3BucketDeleted, strconv.FormatBool(opts.Deleted))
respBody, err := client.call(peerS3MethodGetBucketInfo, v, nil, -1)
if err != nil {
return BucketInfo{}, err
}
defer xhttp.DrainBody(respBody)
var bucketInfo BucketInfo
err = gob.NewDecoder(respBody).Decode(&bucketInfo)
return bucketInfo, err
}
// MakeBucket creates bucket across all peers
func (sys *S3PeerSys) MakeBucket(ctx context.Context, bucket string, opts MakeBucketOptions) error {
g := errgroup.WithNErrs(len(sys.peerClients))
for idx, client := range sys.peerClients {
client := client
g.Go(func() error {
if client == nil {
return errPeerOffline
}
return client.MakeBucket(ctx, bucket, opts)
}, idx)
}
errs := g.Wait()
for poolIdx := 0; poolIdx < sys.poolsCount; poolIdx++ {
perPoolErrs := make([]error, 0, len(sys.peerClients))
for i, client := range sys.peerClients {
if slices.Contains(client.GetPools(), poolIdx) {
perPoolErrs = append(perPoolErrs, errs[i])
}
}
if poolErr := reduceReadQuorumErrs(ctx, errs, bucketOpIgnoredErrs, len(perPoolErrs)/2+1); poolErr != nil {
return toObjectErr(poolErr, bucket)
}
}
return nil
}
// MakeBucket creates a bucket on a peer
func (client *remotePeerS3Client) MakeBucket(ctx context.Context, bucket string, opts MakeBucketOptions) error {
v := url.Values{}
v.Set(peerS3Bucket, bucket)
v.Set(peerS3BucketForceCreate, strconv.FormatBool(opts.ForceCreate))
respBody, err := client.call(peerS3MethodMakeBucket, v, nil, -1)
if err != nil {
return err
}
defer xhttp.DrainBody(respBody)
return nil
}
// DeleteBucket deletes bucket across all peers
func (sys *S3PeerSys) DeleteBucket(ctx context.Context, bucket string, opts DeleteBucketOptions) error {
g := errgroup.WithNErrs(len(sys.peerClients))
for idx, client := range sys.peerClients {
client := client
g.Go(func() error {
if client == nil {
return errPeerOffline
}
return client.DeleteBucket(ctx, bucket, opts)
}, idx)
}
errs := g.Wait()
for poolIdx := 0; poolIdx < sys.poolsCount; poolIdx++ {
perPoolErrs := make([]error, 0, len(sys.peerClients))
for i, client := range sys.peerClients {
if slices.Contains(client.GetPools(), poolIdx) {
perPoolErrs = append(perPoolErrs, errs[i])
}
}
if poolErr := reduceReadQuorumErrs(ctx, errs, bucketOpIgnoredErrs, len(perPoolErrs)/2+1); poolErr != nil {
// re-create successful deletes, since we are return an error.
sys.MakeBucket(ctx, bucket, MakeBucketOptions{})
return toObjectErr(poolErr, bucket)
}
}
return nil
}
// DeleteBucket deletes bucket on a peer
func (client *remotePeerS3Client) DeleteBucket(ctx context.Context, bucket string, opts DeleteBucketOptions) error {
v := url.Values{}
v.Set(peerS3Bucket, bucket)
v.Set(peerS3BucketForceDelete, strconv.FormatBool(opts.Force))
respBody, err := client.call(peerS3MethodDeleteBucket, v, nil, -1)
if err != nil {
return err
}
defer xhttp.DrainBody(respBody)
return nil
}
func (client remotePeerS3Client) GetHost() string {
return client.host
}
func (client remotePeerS3Client) GetPools() []int {
return client.pools
}
func (client *remotePeerS3Client) SetPools(p []int) {
client.pools = make([]int, len(p))
copy(client.pools, p)
}
// newPeerS3Clients creates new peer clients.
func newPeerS3Clients(nodes []Node) (peers []peerS3Client) {
peers = make([]peerS3Client, len(nodes))
for i, node := range nodes {
if node.IsLocal {
peers[i] = &localPeerS3Client{host: node.Host}
} else {
peers[i] = newPeerS3Client(node.Host)
}
peers[i].SetPools(node.Pools)
}
return
}
// Returns a peer S3 client.
func newPeerS3Client(peer string) peerS3Client {
scheme := "http"
if globalIsTLS {
scheme = "https"
}
serverURL := &url.URL{
Scheme: scheme,
Host: peer,
Path: peerS3Path,
}
restClient := rest.NewClient(serverURL, globalInternodeTransport, newCachedAuthToken())
// Use a separate client to avoid recursive calls.
healthClient := rest.NewClient(serverURL, globalInternodeTransport, newCachedAuthToken())
healthClient.NoMetrics = true
// Construct a new health function.
restClient.HealthCheckFn = func() bool {
ctx, cancel := context.WithTimeout(context.Background(), restClient.HealthCheckTimeout)
defer cancel()
respBody, err := healthClient.Call(ctx, peerS3MethodHealth, nil, nil, -1)
xhttp.DrainBody(respBody)
return !isNetworkError(err)
}
return &remotePeerS3Client{host: peer, restClient: restClient}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"errors"
"github.com/zeebo/xxh3"
)
func getFileInfoVersions(xlMetaBuf []byte, volume, path string) (FileInfoVersions, error) {
fivs, err := getAllFileInfoVersions(xlMetaBuf, volume, path)
if err != nil {
return fivs, err
}
n := 0
for _, fi := range fivs.Versions {
// Filter our tier object delete marker
if !fi.TierFreeVersion() {
fivs.Versions[n] = fi
n++
} else {
fivs.FreeVersions = append(fivs.FreeVersions, fi)
}
}
fivs.Versions = fivs.Versions[:n]
// Update numversions
for i := range fivs.Versions {
fivs.Versions[i].NumVersions = n
}
return fivs, nil
}
func getAllFileInfoVersions(xlMetaBuf []byte, volume, path string) (FileInfoVersions, error) {
var versions []FileInfo
var err error
if buf, _, e := isIndexedMetaV2(xlMetaBuf); e != nil {
return FileInfoVersions{}, e
} else if buf != nil {
versions, err = buf.ListVersions(volume, path)
} else {
var xlMeta xlMetaV2
if err := xlMeta.LoadOrConvert(xlMetaBuf); err != nil {
return FileInfoVersions{}, err
}
versions, err = xlMeta.ListVersions(volume, path)
}
if err == nil && len(versions) == 0 {
// This special case is needed to handle len(xlMeta.versions) == 0
versions = []FileInfo{
{
Volume: volume,
Name: path,
Deleted: true,
IsLatest: true,
ModTime: timeSentinel1970,
},
}
}
if err != nil {
return FileInfoVersions{}, err
}
return FileInfoVersions{
Volume: volume,
Name: path,
Versions: versions,
LatestModTime: versions[0].ModTime,
}, nil
}
func getFileInfo(xlMetaBuf []byte, volume, path, versionID string, data bool) (FileInfo, error) {
var fi FileInfo
var err error
var inData xlMetaInlineData
if buf, data, e := isIndexedMetaV2(xlMetaBuf); e != nil {
return FileInfo{}, e
} else if buf != nil {
inData = data
fi, err = buf.ToFileInfo(volume, path, versionID)
if len(buf) != 0 && errors.Is(err, errFileNotFound) {
// This special case is needed to handle len(xlMeta.versions) == 0
return FileInfo{
Volume: volume,
Name: path,
VersionID: versionID,
Deleted: true,
IsLatest: true,
ModTime: timeSentinel1970,
}, nil
}
} else {
var xlMeta xlMetaV2
if err := xlMeta.LoadOrConvert(xlMetaBuf); err != nil {
return FileInfo{}, err
}
if len(xlMeta.versions) == 0 {
// This special case is needed to handle len(xlMeta.versions) == 0
return FileInfo{
Volume: volume,
Name: path,
VersionID: versionID,
Deleted: true,
IsLatest: true,
ModTime: timeSentinel1970,
}, nil
}
inData = xlMeta.data
fi, err = xlMeta.ToFileInfo(volume, path, versionID, false)
}
if !data || err != nil {
return fi, err
}
versionID = fi.VersionID
if versionID == "" {
versionID = nullVersionID
}
fi.Data = inData.find(versionID)
if len(fi.Data) == 0 {
// PR #11758 used DataDir, preserve it
// for users who might have used master
// branch
fi.Data = inData.find(fi.DataDir)
}
return fi, nil
}
// getXLDiskLoc will return the pool/set/disk id if it can be located in the object layer.
// Will return -1 for unknown values.
func getXLDiskLoc(diskID string) (poolIdx, setIdx, diskIdx int) {
if api := newObjectLayerFn(); api != nil {
if globalIsErasureSD {
return 0, 0, 0
}
if ep, ok := api.(*erasureServerPools); ok {
if pool, set, disk, err := ep.getPoolAndSet(diskID); err == nil {
return pool, set, disk
}
}
}
return -1, -1, -1
}
// hashDeterministicString will return a deterministic hash for the map values.
// Trivial collisions are avoided, but this is by no means a strong hash.
func hashDeterministicString(m map[string]string) uint64 {
// Seed (random)
crc := uint64(0xc2b40bbac11a7295)
// Xor each value to make order independent
for k, v := range m {
// Separate key and value with an individual xor with a random number.
// Add values of each, so they cannot be trivially collided.
crc ^= (xxh3.HashString(k) ^ 0x4ee3bbaf7ab2506b) + (xxh3.HashString(v) ^ 0x8da4c8da66194257)
}
return crc
}
// hashDeterministicBytes will return a deterministic (weak) hash for the map values.
// Trivial collisions are avoided, but this is by no means a strong hash.
func hashDeterministicBytes(m map[string][]byte) uint64 {
crc := uint64(0x1bbc7e1dde654743)
for k, v := range m {
crc ^= (xxh3.HashString(k) ^ 0x4ee3bbaf7ab2506b) + (xxh3.Hash(v) ^ 0x8da4c8da66194257)
}
return crc
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"fmt"
"os"
"strings"
"testing"
"time"
"github.com/minio/madmin-go/v3"
minio "github.com/minio/minio-go/v7"
cr "github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio-go/v7/pkg/set"
)
func runAllIAMSTSTests(suite *TestSuiteIAM, c *check) {
suite.SetUpSuite(c)
// The STS for root test needs to be the first one after setup.
suite.TestSTSForRoot(c)
suite.TestSTS(c)
suite.TestSTSWithDenyDeleteVersion(c)
suite.TestSTSWithTags(c)
suite.TestSTSServiceAccountsWithUsername(c)
suite.TestSTSWithGroupPolicy(c)
suite.TearDownSuite(c)
}
func TestIAMInternalIDPSTSServerSuite(t *testing.T) {
baseTestCases := []TestSuiteCommon{
// Init and run test on ErasureSD backend with signature v4.
{serverType: "ErasureSD", signer: signerV4},
// Init and run test on ErasureSD backend, with tls enabled.
{serverType: "ErasureSD", signer: signerV4, secure: true},
// Init and run test on Erasure backend.
{serverType: "Erasure", signer: signerV4},
// Init and run test on ErasureSet backend.
{serverType: "ErasureSet", signer: signerV4},
}
testCases := []*TestSuiteIAM{}
for _, bt := range baseTestCases {
testCases = append(testCases,
newTestSuiteIAM(bt, false),
newTestSuiteIAM(bt, true),
)
}
for i, testCase := range testCases {
etcdStr := ""
if testCase.withEtcdBackend {
etcdStr = " (with etcd backend)"
}
t.Run(
fmt.Sprintf("Test: %d, ServerType: %s%s", i+1, testCase.serverType, etcdStr),
func(t *testing.T) {
runAllIAMSTSTests(testCase, &check{t, testCase.serverType})
},
)
}
}
func (s *TestSuiteIAM) TestSTSServiceAccountsWithUsername(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
bucket := "dillon-bucket"
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket create error: %v", err)
}
// Create policy
policy := "mypolicy-username"
policyBytes := []byte(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:*"
],
"Resource": [
"arn:aws:s3:::${aws:username}-*"
]
}
]
}`)
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
}
if err = s.adm.AddUser(ctx, "dillon", "<PASSWORD>"); err != nil {
c.Fatalf("policy add error: %v", err)
}
err = s.adm.SetPolicy(ctx, policy, "dillon", false)
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
}
assumeRole := cr.STSAssumeRole{
Client: s.TestSuiteCommon.client,
STSEndpoint: s.endPoint,
Options: cr.STSAssumeRoleOptions{
AccessKey: "dillon",
SecretKey: "dillon-123",
Location: "",
},
}
value, err := assumeRole.Retrieve()
if err != nil {
c.Fatalf("Expected to generate STS creds, got err: %#v", err)
}
// Check that the LDAP sts cred is actually working.
minioClient, err := minio.New(s.endpoint, &minio.Options{
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
Secure: s.secure,
Transport: s.TestSuiteCommon.client.Transport,
})
if err != nil {
c.Fatalf("Error initializing client: %v", err)
}
// Validate that the client from sts creds can access the bucket.
c.mustListObjects(ctx, minioClient, bucket)
// Create an madmin client with user creds
userAdmClient, err := madmin.NewWithOptions(s.endpoint, &madmin.Options{
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
Secure: s.secure,
})
if err != nil {
c.Fatalf("Err creating user admin client: %v", err)
}
userAdmClient.SetCustomTransport(s.TestSuiteCommon.client.Transport)
// Create svc acc
cr := c.mustCreateSvcAccount(ctx, value.AccessKeyID, userAdmClient)
svcClient := s.getUserClient(c, cr.AccessKey, cr.SecretKey, "")
// 1. Check S3 access for service account ListObjects()
c.mustListObjects(ctx, svcClient, bucket)
// 2. Check S3 access for upload
c.mustUpload(ctx, svcClient, bucket)
// 3. Check S3 access for download
c.mustDownload(ctx, svcClient, bucket)
}
func (s *TestSuiteIAM) TestSTSWithDenyDeleteVersion(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
bucket := getRandomBucketName()
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{ObjectLocking: true})
if err != nil {
c.Fatalf("bucket creat error: %v", err)
}
// Create policy, user and associate policy
policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "ObjectActionsRW",
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:PutObjectTagging",
"s3:AbortMultipartUpload",
"s3:DeleteObject",
"s3:GetObject",
"s3:GetObjectTagging",
"s3:GetObjectVersion",
"s3:ListMultipartUploadParts"
],
"Resource": [
"arn:aws:s3:::%s/*"
]
},
{
"Sid": "DenyDeleteVersionAction",
"Effect": "Deny",
"Action": [
"s3:DeleteObjectVersion"
],
"Resource": [
"arn:aws:s3:::%s/*"
]
}
]
}
`, bucket, bucket))
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
}
accessKey, secretKey := mustGenerateCredentials(c)
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
if err != nil {
c.Fatalf("Unable to set user: %v", err)
}
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
}
// confirm that the user is able to access the bucket
uClient := s.getUserClient(c, accessKey, secretKey, "")
versions := c.mustUploadReturnVersions(ctx, uClient, bucket)
c.mustNotDelete(ctx, uClient, bucket, versions[0])
assumeRole := cr.STSAssumeRole{
Client: s.TestSuiteCommon.client,
STSEndpoint: s.endPoint,
Options: cr.STSAssumeRoleOptions{
AccessKey: accessKey,
SecretKey: secretKey,
Location: "",
},
}
value, err := assumeRole.Retrieve()
if err != nil {
c.Fatalf("err calling assumeRole: %v", err)
}
minioClient, err := minio.New(s.endpoint, &minio.Options{
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
Secure: s.secure,
Transport: s.TestSuiteCommon.client.Transport,
})
if err != nil {
c.Fatalf("Error initializing client: %v", err)
}
versions = c.mustUploadReturnVersions(ctx, minioClient, bucket)
c.mustNotDelete(ctx, minioClient, bucket, versions[0])
}
func (s *TestSuiteIAM) TestSTSWithTags(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
bucket := getRandomBucketName()
object := getRandomObjectName()
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket creat error: %v", err)
}
// Create policy, user and associate policy
policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::%s/*",
"Condition": { "StringEquals": {"s3:ExistingObjectTag/security": "public" } }
},
{
"Effect": "Allow",
"Action": "s3:DeleteObjectTagging",
"Resource": "arn:aws:s3:::%s/*",
"Condition": { "StringEquals": {"s3:ExistingObjectTag/security": "public" } }
},
{
"Effect": "Allow",
"Action": "s3:DeleteObject",
"Resource": "arn:aws:s3:::%s/*"
},
{
"Effect": "Allow",
"Action": [
"s3:PutObject"
],
"Resource": [
"arn:aws:s3:::%s/*"
],
"Condition": {
"ForAllValues:StringLike": {
"s3:RequestObjectTagKeys": [
"security",
"virus"
]
}
}
}
]
}`, bucket, bucket, bucket, bucket))
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
}
accessKey, secretKey := mustGenerateCredentials(c)
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
if err != nil {
c.Fatalf("Unable to set user: %v", err)
}
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
}
// confirm that the user is able to access the bucket
uClient := s.getUserClient(c, accessKey, secretKey, "")
c.mustPutObjectWithTags(ctx, uClient, bucket, object)
c.mustGetObject(ctx, uClient, bucket, object)
assumeRole := cr.STSAssumeRole{
Client: s.TestSuiteCommon.client,
STSEndpoint: s.endPoint,
Options: cr.STSAssumeRoleOptions{
AccessKey: accessKey,
SecretKey: secretKey,
Location: "",
},
}
value, err := assumeRole.Retrieve()
if err != nil {
c.Fatalf("err calling assumeRole: %v", err)
}
minioClient, err := minio.New(s.endpoint, &minio.Options{
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
Secure: s.secure,
Transport: s.TestSuiteCommon.client.Transport,
})
if err != nil {
c.Fatalf("Error initializing client: %v", err)
}
// Validate sts creds can access the object
c.mustPutObjectWithTags(ctx, minioClient, bucket, object)
c.mustGetObject(ctx, minioClient, bucket, object)
c.mustHeadObject(ctx, minioClient, bucket, object, 2)
// Validate that the client can remove objects
if err = minioClient.RemoveObjectTagging(ctx, bucket, object, minio.RemoveObjectTaggingOptions{}); err != nil {
c.Fatalf("user is unable to delete the object tags: %v", err)
}
if err = minioClient.RemoveObject(ctx, bucket, object, minio.RemoveObjectOptions{}); err != nil {
c.Fatalf("user is unable to delete the object: %v", err)
}
}
func (s *TestSuiteIAM) TestSTS(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
bucket := getRandomBucketName()
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket creat error: %v", err)
}
// Create policy, user and associate policy
policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::%s/*"
]
}
]
}`, bucket))
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
}
accessKey, secretKey := mustGenerateCredentials(c)
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
if err != nil {
c.Fatalf("Unable to set user: %v", err)
}
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
}
// confirm that the user is able to access the bucket
uClient := s.getUserClient(c, accessKey, secretKey, "")
c.mustListObjects(ctx, uClient, bucket)
assumeRole := cr.STSAssumeRole{
Client: s.TestSuiteCommon.client,
STSEndpoint: s.endPoint,
Options: cr.STSAssumeRoleOptions{
AccessKey: accessKey,
SecretKey: secretKey,
Location: "",
},
}
value, err := assumeRole.Retrieve()
if err != nil {
c.Fatalf("err calling assumeRole: %v", err)
}
minioClient, err := minio.New(s.endpoint, &minio.Options{
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
Secure: s.secure,
Transport: s.TestSuiteCommon.client.Transport,
})
if err != nil {
c.Fatalf("Error initializing client: %v", err)
}
// Validate that the client from sts creds can access the bucket.
c.mustListObjects(ctx, minioClient, bucket)
// Validate that the client cannot remove any objects
err = minioClient.RemoveObject(ctx, bucket, "someobject", minio.RemoveObjectOptions{})
if err.Error() != "Access Denied." {
c.Fatalf("unexpected non-access-denied err: %v", err)
}
}
func (s *TestSuiteIAM) TestSTSWithGroupPolicy(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
bucket := getRandomBucketName()
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket creat error: %v", err)
}
// Create policy, user and associate policy
policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::%s/*"
]
}
]
}`, bucket))
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
}
accessKey, secretKey := mustGenerateCredentials(c)
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
if err != nil {
c.Fatalf("Unable to set user: %v", err)
}
// confirm that the user is unable to access the bucket - we have not
// yet set any policy
uClient := s.getUserClient(c, accessKey, secretKey, "")
c.mustNotListObjects(ctx, uClient, bucket)
err = s.adm.UpdateGroupMembers(ctx, madmin.GroupAddRemove{
Group: "test-group",
Members: []string{accessKey},
})
if err != nil {
c.Fatalf("unable to add user to group: %v", err)
}
err = s.adm.SetPolicy(ctx, policy, "test-group", true)
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
}
// confirm that the user is able to access the bucket - permission comes
// from group.
c.mustListObjects(ctx, uClient, bucket)
// Create STS user.
assumeRole := cr.STSAssumeRole{
Client: s.TestSuiteCommon.client,
STSEndpoint: s.endPoint,
Options: cr.STSAssumeRoleOptions{
AccessKey: accessKey,
SecretKey: secretKey,
Location: "",
},
}
value, err := assumeRole.Retrieve()
if err != nil {
c.Fatalf("err calling assumeRole: %v", err)
}
// Check that STS user client has access coming from parent user's
// group.
minioClient, err := minio.New(s.endpoint, &minio.Options{
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
Secure: s.secure,
Transport: s.TestSuiteCommon.client.Transport,
})
if err != nil {
c.Fatalf("Error initializing client: %v", err)
}
// Validate that the client from sts creds can access the bucket.
c.mustListObjects(ctx, minioClient, bucket)
// Validate that the client cannot remove any objects
err = minioClient.RemoveObject(ctx, bucket, "someobject", minio.RemoveObjectOptions{})
if err.Error() != "Access Denied." {
c.Fatalf("unexpected non-access-denied err: %v", err)
}
}
// TestSTSForRoot - needs to be the first test after server setup due to the
// buckets list check.
func (s *TestSuiteIAM) TestSTSForRoot(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
bucket := getRandomBucketName()
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket create error: %v", err)
}
assumeRole := cr.STSAssumeRole{
Client: s.TestSuiteCommon.client,
STSEndpoint: s.endPoint,
Options: cr.STSAssumeRoleOptions{
AccessKey: globalActiveCred.AccessKey,
SecretKey: globalActiveCred.SecretKey,
Location: "",
},
}
value, err := assumeRole.Retrieve()
if err != nil {
c.Fatalf("err calling assumeRole: %v", err)
}
minioClient, err := minio.New(s.endpoint, &minio.Options{
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
Secure: s.secure,
Transport: s.TestSuiteCommon.client.Transport,
})
if err != nil {
c.Fatalf("Error initializing client: %v", err)
}
// Validate that the client from sts creds can access the bucket.
c.mustListObjects(ctx, minioClient, bucket)
// Validate that a bucket can be created
bucket2 := getRandomBucketName()
err = minioClient.MakeBucket(ctx, bucket2, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket creat error: %v", err)
}
// Validate that admin APIs can be called - create an madmin client with
// user creds
userAdmClient, err := madmin.NewWithOptions(s.endpoint, &madmin.Options{
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
Secure: s.secure,
})
if err != nil {
c.Fatalf("Err creating user admin client: %v", err)
}
userAdmClient.SetCustomTransport(s.TestSuiteCommon.client.Transport)
accInfo, err := userAdmClient.AccountInfo(ctx, madmin.AccountOpts{})
if err != nil {
c.Fatalf("root user STS should be able to get account info: %v", err)
}
gotBuckets := set.NewStringSet()
for _, b := range accInfo.Buckets {
gotBuckets.Add(b.Name)
if !(b.Access.Read && b.Access.Write) {
c.Fatalf("root user should have read and write access to bucket: %v", b.Name)
}
}
shouldHaveBuckets := set.CreateStringSet(bucket2, bucket)
if !gotBuckets.Equals(shouldHaveBuckets) {
c.Fatalf("root user should have access to all buckets")
}
// This must fail.
if err := userAdmClient.AddUser(ctx, globalActiveCred.AccessKey, globalActiveCred.SecretKey); err == nil {
c.Fatal("AddUser() for root credential must fail via root STS creds")
}
}
// SetUpLDAP - expects to setup an LDAP test server using the test LDAP
// container and canned data from https://github.com/minio/minio-ldap-testing
func (s *TestSuiteIAM) SetUpLDAP(c *check, serverAddr string) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
configCmds := []string{
"identity_ldap",
fmt.Sprintf("server_addr=%s", serverAddr),
"server_insecure=on",
"lookup_bind_dn=cn=admin,dc=min,dc=io",
"lookup_bind_password=<PASSWORD>",
"user_dn_search_base_dn=dc=min,dc=io",
"user_dn_search_filter=(uid=%s)",
"group_search_base_dn=ou=swengg,dc=min,dc=io",
"group_search_filter=(&(objectclass=groupofnames)(member=%d))",
}
_, err := s.adm.SetConfigKV(ctx, strings.Join(configCmds, " "))
if err != nil {
c.Fatalf("unable to setup LDAP for tests: %v", err)
}
s.RestartIAMSuite(c)
}
const (
EnvTestLDAPServer = "LDAP_TEST_SERVER"
)
func TestIAMWithLDAPServerSuite(t *testing.T) {
for i, testCase := range iamTestSuites {
t.Run(
fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.ServerTypeDescription),
func(t *testing.T) {
c := &check{t, testCase.serverType}
suite := testCase
ldapServer := os.Getenv(EnvTestLDAPServer)
if ldapServer == "" {
c.Skip("Skipping LDAP test as no LDAP server is provided.")
}
suite.SetUpSuite(c)
suite.SetUpLDAP(c, ldapServer)
suite.TestLDAPSTS(c)
suite.TestLDAPSTSServiceAccounts(c)
suite.TestLDAPSTSServiceAccountsWithUsername(c)
suite.TestLDAPSTSServiceAccountsWithGroups(c)
suite.TearDownSuite(c)
},
)
}
}
func (s *TestSuiteIAM) TestLDAPSTS(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
bucket := getRandomBucketName()
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket create error: %v", err)
}
// Create policy
policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::%s/*"
]
}
]
}`, bucket))
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
}
ldapID := cr.LDAPIdentity{
Client: s.TestSuiteCommon.client,
STSEndpoint: s.endPoint,
LDAPUsername: "dillon",
LDAPPassword: "<PASSWORD>",
}
_, err = ldapID.Retrieve()
if err == nil {
c.Fatalf("Expected to fail to create STS cred with no associated policy!")
}
// Attempting to set a non-existent policy should fail.
userDN := "uid=dillon,ou=people,ou=swengg,dc=min,dc=io"
err = s.adm.SetPolicy(ctx, policy+"x", userDN, false)
if err == nil {
c.Fatalf("should not be able to set non-existent policy")
}
err = s.adm.SetPolicy(ctx, policy, userDN, false)
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
}
value, err := ldapID.Retrieve()
if err != nil {
c.Fatalf("Expected to generate STS creds, got err: %#v", err)
}
minioClient, err := minio.New(s.endpoint, &minio.Options{
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
Secure: s.secure,
Transport: s.TestSuiteCommon.client.Transport,
})
if err != nil {
c.Fatalf("Error initializing client: %v", err)
}
// Validate that user listing does not return any entries
usersList, err := s.adm.ListUsers(ctx)
if err != nil {
c.Fatalf("list users should not fail: %v", err)
}
if len(usersList) != 1 {
c.Fatalf("expected user listing output: %v", usersList)
}
uinfo := usersList[userDN]
if uinfo.PolicyName != policy || uinfo.Status != madmin.AccountEnabled {
c.Fatalf("expected user listing content: %v", uinfo)
}
// Validate that the client from sts creds can access the bucket.
c.mustListObjects(ctx, minioClient, bucket)
// Validate that the client cannot remove any objects
err = minioClient.RemoveObject(ctx, bucket, "someobject", minio.RemoveObjectOptions{})
if err.Error() != "Access Denied." {
c.Fatalf("unexpected non-access-denied err: %v", err)
}
// Remove the policy assignment on the user DN:
err = s.adm.SetPolicy(ctx, "", userDN, false)
if err != nil {
c.Fatalf("Unable to remove policy setting: %v", err)
}
_, err = ldapID.Retrieve()
if err == nil {
c.Fatalf("Expected to fail to create a user with no associated policy!")
}
// Set policy via group and validate policy assignment.
groupDN := "cn=projectb,ou=groups,ou=swengg,dc=min,dc=io"
err = s.adm.SetPolicy(ctx, policy, groupDN, true)
if err != nil {
c.Fatalf("Unable to set group policy: %v", err)
}
value, err = ldapID.Retrieve()
if err != nil {
c.Fatalf("Expected to generate STS creds, got err: %#v", err)
}
minioClient, err = minio.New(s.endpoint, &minio.Options{
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
Secure: s.secure,
Transport: s.TestSuiteCommon.client.Transport,
})
if err != nil {
c.Fatalf("Error initializing client: %v", err)
}
// Validate that the client from sts creds can access the bucket.
c.mustListObjects(ctx, minioClient, bucket)
// Validate that the client cannot remove any objects
err = minioClient.RemoveObject(ctx, bucket, "someobject", minio.RemoveObjectOptions{})
c.Assert(err.Error(), "Access Denied.")
}
func (s *TestSuiteIAM) TestLDAPSTSServiceAccounts(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
bucket := getRandomBucketName()
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket create error: %v", err)
}
// Create policy
policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::%s/*"
]
}
]
}`, bucket))
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
}
userDN := "uid=dillon,ou=people,ou=swengg,dc=min,dc=io"
err = s.adm.SetPolicy(ctx, policy, userDN, false)
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
}
ldapID := cr.LDAPIdentity{
Client: s.TestSuiteCommon.client,
STSEndpoint: s.endPoint,
LDAPUsername: "dillon",
LDAPPassword: "<PASSWORD>",
}
value, err := ldapID.Retrieve()
if err != nil {
c.Fatalf("Expected to generate STS creds, got err: %#v", err)
}
// Check that the LDAP sts cred is actually working.
minioClient, err := minio.New(s.endpoint, &minio.Options{
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
Secure: s.secure,
Transport: s.TestSuiteCommon.client.Transport,
})
if err != nil {
c.Fatalf("Error initializing client: %v", err)
}
// Validate that the client from sts creds can access the bucket.
c.mustListObjects(ctx, minioClient, bucket)
// Create an madmin client with user creds
userAdmClient, err := madmin.NewWithOptions(s.endpoint, &madmin.Options{
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
Secure: s.secure,
})
if err != nil {
c.Fatalf("Err creating user admin client: %v", err)
}
userAdmClient.SetCustomTransport(s.TestSuiteCommon.client.Transport)
// Create svc acc
cr := c.mustCreateSvcAccount(ctx, value.AccessKeyID, userAdmClient)
// 1. Check that svc account appears in listing
c.assertSvcAccAppearsInListing(ctx, userAdmClient, value.AccessKeyID, cr.AccessKey)
// 2. Check that svc account info can be queried
c.assertSvcAccInfoQueryable(ctx, userAdmClient, value.AccessKeyID, cr.AccessKey, true)
// 3. Check S3 access
c.assertSvcAccS3Access(ctx, s, cr, bucket)
// 4. Check that svc account can restrict the policy, and that the
// session policy can be updated.
c.assertSvcAccSessionPolicyUpdate(ctx, s, userAdmClient, value.AccessKeyID, bucket)
// 4. Check that service account's secret key and account status can be
// updated.
c.assertSvcAccSecretKeyAndStatusUpdate(ctx, s, userAdmClient, value.AccessKeyID, bucket)
// 5. Check that service account can be deleted.
c.assertSvcAccDeletion(ctx, s, userAdmClient, value.AccessKeyID, bucket)
// 6. Check that service account cannot be created for some other user.
c.mustNotCreateSvcAccount(ctx, globalActiveCred.AccessKey, userAdmClient)
}
func (s *TestSuiteIAM) TestLDAPSTSServiceAccountsWithUsername(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
bucket := "dillon"
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket create error: %v", err)
}
// Create policy
policy := "mypolicy-username"
policyBytes := []byte(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::${ldap:username}/*"
]
}
]
}`)
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
}
userDN := "uid=dillon,ou=people,ou=swengg,dc=min,dc=io"
err = s.adm.SetPolicy(ctx, policy, userDN, false)
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
}
ldapID := cr.LDAPIdentity{
Client: s.TestSuiteCommon.client,
STSEndpoint: s.endPoint,
LDAPUsername: "dillon",
LDAPPassword: "<PASSWORD>",
}
value, err := ldapID.Retrieve()
if err != nil {
c.Fatalf("Expected to generate STS creds, got err: %#v", err)
}
// Check that the LDAP sts cred is actually working.
minioClient, err := minio.New(s.endpoint, &minio.Options{
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
Secure: s.secure,
Transport: s.TestSuiteCommon.client.Transport,
})
if err != nil {
c.Fatalf("Error initializing client: %v", err)
}
// Validate that the client from sts creds can access the bucket.
c.mustListObjects(ctx, minioClient, bucket)
// Create an madmin client with user creds
userAdmClient, err := madmin.NewWithOptions(s.endpoint, &madmin.Options{
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
Secure: s.secure,
})
if err != nil {
c.Fatalf("Err creating user admin client: %v", err)
}
userAdmClient.SetCustomTransport(s.TestSuiteCommon.client.Transport)
// Create svc acc
cr := c.mustCreateSvcAccount(ctx, value.AccessKeyID, userAdmClient)
svcClient := s.getUserClient(c, cr.AccessKey, cr.SecretKey, "")
// 1. Check S3 access for service account ListObjects()
c.mustListObjects(ctx, svcClient, bucket)
// 2. Check S3 access for upload
c.mustUpload(ctx, svcClient, bucket)
// 3. Check S3 access for download
c.mustDownload(ctx, svcClient, bucket)
}
// In this test, the parent users gets their permissions from a group, rather
// than having a policy set directly on them.
func (s *TestSuiteIAM) TestLDAPSTSServiceAccountsWithGroups(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
bucket := getRandomBucketName()
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket create error: %v", err)
}
// Create policy
policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::%s/*"
]
}
]
}`, bucket))
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
}
groupDN := "cn=projecta,ou=groups,ou=swengg,dc=min,dc=io"
err = s.adm.SetPolicy(ctx, policy, groupDN, true)
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
}
ldapID := cr.LDAPIdentity{
Client: s.TestSuiteCommon.client,
STSEndpoint: s.endPoint,
LDAPUsername: "dillon",
LDAPPassword: "<PASSWORD>",
}
value, err := ldapID.Retrieve()
if err != nil {
c.Fatalf("Expected to generate STS creds, got err: %#v", err)
}
// Check that the LDAP sts cred is actually working.
minioClient, err := minio.New(s.endpoint, &minio.Options{
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
Secure: s.secure,
Transport: s.TestSuiteCommon.client.Transport,
})
if err != nil {
c.Fatalf("Error initializing client: %v", err)
}
// Validate that the client from sts creds can access the bucket.
c.mustListObjects(ctx, minioClient, bucket)
// Create an madmin client with user creds
userAdmClient, err := madmin.NewWithOptions(s.endpoint, &madmin.Options{
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
Secure: s.secure,
})
if err != nil {
c.Fatalf("Err creating user admin client: %v", err)
}
userAdmClient.SetCustomTransport(s.TestSuiteCommon.client.Transport)
// Create svc acc
cr := c.mustCreateSvcAccount(ctx, value.AccessKeyID, userAdmClient)
// 1. Check that svc account appears in listing
c.assertSvcAccAppearsInListing(ctx, userAdmClient, value.AccessKeyID, cr.AccessKey)
// 2. Check that svc account info can be queried
c.assertSvcAccInfoQueryable(ctx, userAdmClient, value.AccessKeyID, cr.AccessKey, true)
// 3. Check S3 access
c.assertSvcAccS3Access(ctx, s, cr, bucket)
// 4. Check that svc account can restrict the policy, and that the
// session policy can be updated.
c.assertSvcAccSessionPolicyUpdate(ctx, s, userAdmClient, value.AccessKeyID, bucket)
// 4. Check that service account's secret key and account status can be
// updated.
c.assertSvcAccSecretKeyAndStatusUpdate(ctx, s, userAdmClient, value.AccessKeyID, bucket)
// 5. Check that service account can be deleted.
c.assertSvcAccDeletion(ctx, s, userAdmClient, value.AccessKeyID, bucket)
// 6. Check that service account cannot be created for some other user.
c.mustNotCreateSvcAccount(ctx, globalActiveCred.AccessKey, userAdmClient)
}
func (s *TestSuiteIAM) TestOpenIDSTS(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
bucket := getRandomBucketName()
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket create error: %v", err)
}
// Generate web identity STS token by interacting with OpenID IDP.
token, err := MockOpenIDTestUserInteraction(ctx, testAppParams, "<EMAIL>", "dillon")
if err != nil {
c.Fatalf("mock user err: %v", err)
}
// fmt.Printf("TOKEN: %s\n", token)
webID := cr.STSWebIdentity{
Client: s.TestSuiteCommon.client,
STSEndpoint: s.endPoint,
GetWebIDTokenExpiry: func() (*cr.WebIdentityToken, error) {
return &cr.WebIdentityToken{
Token: token,
}, nil
},
}
// Create policy - with name as one of the groups in OpenID the user is
// a member of.
policy := "projecta"
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::%s/*"
]
}
]
}`, bucket))
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
}
value, err := webID.Retrieve()
if err != nil {
c.Fatalf("Expected to generate STS creds, got err: %#v", err)
}
minioClient, err := minio.New(s.endpoint, &minio.Options{
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
Secure: s.secure,
Transport: s.TestSuiteCommon.client.Transport,
})
if err != nil {
c.Fatalf("Error initializing client: %v", err)
}
// Validate that the client from sts creds can access the bucket.
c.mustListObjects(ctx, minioClient, bucket)
// Validate that the client cannot remove any objects
err = minioClient.RemoveObject(ctx, bucket, "someobject", minio.RemoveObjectOptions{})
if err.Error() != "Access Denied." {
c.Fatalf("unexpected non-access-denied err: %v", err)
}
}
func (s *TestSuiteIAM) TestOpenIDSTSAddUser(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
bucket := getRandomBucketName()
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket create error: %v", err)
}
// Generate web identity STS token by interacting with OpenID IDP.
token, err := MockOpenIDTestUserInteraction(ctx, testAppParams, "<EMAIL>", "dillon")
if err != nil {
c.Fatalf("mock user err: %v", err)
}
webID := cr.STSWebIdentity{
Client: s.TestSuiteCommon.client,
STSEndpoint: s.endPoint,
GetWebIDTokenExpiry: func() (*cr.WebIdentityToken, error) {
return &cr.WebIdentityToken{
Token: token,
}, nil
},
}
// Create policy - with name as one of the groups in OpenID the user is
// a member of.
policy := "projecta"
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::%s/*"
]
}
]
}`, bucket))
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
}
value, err := webID.Retrieve()
if err != nil {
c.Fatalf("Expected to generate STS creds, got err: %#v", err)
}
// Create an madmin client with user creds
userAdmClient, err := madmin.NewWithOptions(s.endpoint, &madmin.Options{
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
Secure: s.secure,
})
if err != nil {
c.Fatalf("Err creating user admin client: %v", err)
}
userAdmClient.SetCustomTransport(s.TestSuiteCommon.client.Transport)
c.mustNotCreateIAMUser(ctx, userAdmClient)
// Create admin user policy.
policyBytes = []byte(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"admin:*"
]
}
]
}`)
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
}
cr := c.mustCreateIAMUser(ctx, userAdmClient)
userInfo := c.mustGetIAMUserInfo(ctx, userAdmClient, cr.AccessKey)
c.Assert(userInfo.Status, madmin.AccountEnabled)
}
func (s *TestSuiteIAM) TestOpenIDServiceAcc(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
bucket := getRandomBucketName()
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket create error: %v", err)
}
// Generate web identity STS token by interacting with OpenID IDP.
token, err := MockOpenIDTestUserInteraction(ctx, testAppParams, "<EMAIL>", "<PASSWORD>")
if err != nil {
c.Fatalf("mock user err: %v", err)
}
webID := cr.STSWebIdentity{
Client: s.TestSuiteCommon.client,
STSEndpoint: s.endPoint,
GetWebIDTokenExpiry: func() (*cr.WebIdentityToken, error) {
return &cr.WebIdentityToken{
Token: token,
}, nil
},
}
// Create policy - with name as one of the groups in OpenID the user is
// a member of.
policy := "projecta"
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::%s/*"
]
}
]
}`, bucket))
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
}
value, err := webID.Retrieve()
if err != nil {
c.Fatalf("Expected to generate STS creds, got err: %#v", err)
}
// Create an madmin client with user creds
userAdmClient, err := madmin.NewWithOptions(s.endpoint, &madmin.Options{
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
Secure: s.secure,
})
if err != nil {
c.Fatalf("Err creating user admin client: %v", err)
}
userAdmClient.SetCustomTransport(s.TestSuiteCommon.client.Transport)
// Create svc acc
cr := c.mustCreateSvcAccount(ctx, value.AccessKeyID, userAdmClient)
// 1. Check that svc account appears in listing
c.assertSvcAccAppearsInListing(ctx, userAdmClient, value.AccessKeyID, cr.AccessKey)
// 2. Check that svc account info can be queried
c.assertSvcAccInfoQueryable(ctx, userAdmClient, value.AccessKeyID, cr.AccessKey, true)
// 3. Check S3 access
c.assertSvcAccS3Access(ctx, s, cr, bucket)
// 4. Check that svc account can restrict the policy, and that the
// session policy can be updated.
c.assertSvcAccSessionPolicyUpdate(ctx, s, userAdmClient, value.AccessKeyID, bucket)
// 4. Check that service account's secret key and account status can be
// updated.
c.assertSvcAccSecretKeyAndStatusUpdate(ctx, s, userAdmClient, value.AccessKeyID, bucket)
// 5. Check that service account can be deleted.
c.assertSvcAccDeletion(ctx, s, userAdmClient, value.AccessKeyID, bucket)
// 6. Check that service account cannot be created for some other user.
c.mustNotCreateSvcAccount(ctx, globalActiveCred.AccessKey, userAdmClient)
}
var testAppParams = OpenIDClientAppParams{
ClientID: "minio-client-app",
ClientSecret: "minio-client-app-secret",
ProviderURL: "http://127.0.0.1:5556/dex",
RedirectURL: "http://127.0.0.1:10000/oauth_callback",
}
const (
EnvTestOpenIDServer = "OPENID_TEST_SERVER"
EnvTestOpenIDServer2 = "OPENID_TEST_SERVER_2"
)
// SetUpOpenIDs - sets up one or more OpenID test servers using the test OpenID
// container and canned data from https://github.com/minio/minio-ldap-testing
//
// Each set of client app params corresponds to a separate openid server, and
// the i-th server in this will be applied the i-th policy in `rolePolicies`. If
// a rolePolicies entry is an empty string, that server will be configured as
// policy-claim based openid server. NOTE that a valid configuration can have a
// policy claim based provider only if it is the only OpenID provider.
func (s *TestSuiteIAM) SetUpOpenIDs(c *check, testApps []OpenIDClientAppParams, rolePolicies []string) error {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
for i, testApp := range testApps {
configCmds := []string{
fmt.Sprintf("identity_openid:%d", i),
fmt.Sprintf("config_url=%s/.well-known/openid-configuration", testApp.ProviderURL),
fmt.Sprintf("client_id=%s", testApp.ClientID),
fmt.Sprintf("client_secret=%s", testApp.ClientSecret),
"scopes=openid,groups",
fmt.Sprintf("redirect_uri=%s", testApp.RedirectURL),
}
if rolePolicies[i] != "" {
configCmds = append(configCmds, fmt.Sprintf("role_policy=%s", rolePolicies[i]))
} else {
configCmds = append(configCmds, "claim_name=groups")
}
_, err := s.adm.SetConfigKV(ctx, strings.Join(configCmds, " "))
if err != nil {
return fmt.Errorf("unable to setup OpenID for tests: %v", err)
}
}
s.RestartIAMSuite(c)
return nil
}
// SetUpOpenID - expects to setup an OpenID test server using the test OpenID
// container and canned data from https://github.com/minio/minio-ldap-testing
func (s *TestSuiteIAM) SetUpOpenID(c *check, serverAddr string, rolePolicy string) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
configCmds := []string{
"identity_openid",
fmt.Sprintf("config_url=%s/.well-known/openid-configuration", serverAddr),
"client_id=minio-client-app",
"client_secret=minio-client-app-secret",
"scopes=openid,groups",
"redirect_uri=http://127.0.0.1:10000/oauth_callback",
}
if rolePolicy != "" {
configCmds = append(configCmds, fmt.Sprintf("role_policy=%s", rolePolicy))
} else {
configCmds = append(configCmds, "claim_name=groups")
}
_, err := s.adm.SetConfigKV(ctx, strings.Join(configCmds, " "))
if err != nil {
c.Fatalf("unable to setup OpenID for tests: %v", err)
}
s.RestartIAMSuite(c)
}
func TestIAMWithOpenIDServerSuite(t *testing.T) {
for i, testCase := range iamTestSuites {
t.Run(
fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.ServerTypeDescription),
func(t *testing.T) {
c := &check{t, testCase.serverType}
suite := testCase
openIDServer := os.Getenv(EnvTestOpenIDServer)
if openIDServer == "" {
c.Skip("Skipping OpenID test as no OpenID server is provided.")
}
suite.SetUpSuite(c)
suite.SetUpOpenID(c, openIDServer, "")
suite.TestOpenIDSTS(c)
suite.TestOpenIDServiceAcc(c)
suite.TestOpenIDSTSAddUser(c)
suite.TearDownSuite(c)
},
)
}
}
func TestIAMWithOpenIDWithRolePolicyServerSuite(t *testing.T) {
for i, testCase := range iamTestSuites {
t.Run(
fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.ServerTypeDescription),
func(t *testing.T) {
c := &check{t, testCase.serverType}
suite := testCase
openIDServer := os.Getenv(EnvTestOpenIDServer)
if openIDServer == "" {
c.Skip("Skipping OpenID test as no OpenID server is provided.")
}
suite.SetUpSuite(c)
suite.SetUpOpenID(c, openIDServer, "readwrite")
suite.TestOpenIDSTSWithRolePolicy(c, testRoleARNs[0], testRoleMap[testRoleARNs[0]])
suite.TestOpenIDServiceAccWithRolePolicy(c)
suite.TearDownSuite(c)
},
)
}
}
func TestIAMWithOpenIDWithRolePolicyWithPolicyVariablesServerSuite(t *testing.T) {
for i, testCase := range iamTestSuites {
t.Run(
fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.ServerTypeDescription),
func(t *testing.T) {
c := &check{t, testCase.serverType}
suite := testCase
openIDServer := os.Getenv(EnvTestOpenIDServer)
if openIDServer == "" {
c.Skip("Skipping OpenID test as no OpenID server is provided.")
}
suite.SetUpSuite(c)
suite.SetUpOpenID(c, openIDServer, "projecta,projectb,projectaorb")
suite.TestOpenIDSTSWithRolePolicyWithPolVar(c, testRoleARNs[0], testRoleMap[testRoleARNs[0]])
suite.TearDownSuite(c)
},
)
}
}
const (
testRoleARN = "arn:minio:iam:::role/nOybJqMNzNmroqEKq5D0EUsRZw0"
testRoleARN2 = "arn:minio:iam:::role/domXb70kze7Ugc1SaxaeFchhLP4"
)
var (
testRoleARNs = []string{testRoleARN, testRoleARN2}
// Load test client app and test role mapping depending on test
// environment.
testClientApps, testRoleMap = func() ([]OpenIDClientAppParams, map[string]OpenIDClientAppParams) {
var apps []OpenIDClientAppParams
m := map[string]OpenIDClientAppParams{}
openIDServer := os.Getenv(EnvTestOpenIDServer)
if openIDServer != "" {
apps = append(apps, OpenIDClientAppParams{
ClientID: "minio-client-app",
ClientSecret: "minio-client-app-secret",
ProviderURL: openIDServer,
RedirectURL: "http://127.0.0.1:10000/oauth_callback",
})
m[testRoleARNs[len(apps)-1]] = apps[len(apps)-1]
}
openIDServer2 := os.Getenv(EnvTestOpenIDServer2)
if openIDServer2 != "" {
apps = append(apps, OpenIDClientAppParams{
ClientID: "minio-client-app-2",
ClientSecret: "minio-client-app-secret-2",
ProviderURL: openIDServer2,
RedirectURL: "http://127.0.0.1:10000/oauth_callback",
})
m[testRoleARNs[len(apps)-1]] = apps[len(apps)-1]
}
return apps, m
}()
)
func (s *TestSuiteIAM) TestOpenIDSTSWithRolePolicy(c *check, roleARN string, clientApp OpenIDClientAppParams) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
bucket := getRandomBucketName()
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket create error: %v", err)
}
// Generate web identity JWT by interacting with OpenID IDP.
token, err := MockOpenIDTestUserInteraction(ctx, clientApp, "<EMAIL>", "dillon")
if err != nil {
c.Fatalf("mock user err: %v", err)
}
// Generate STS credential.
webID := cr.STSWebIdentity{
Client: s.TestSuiteCommon.client,
STSEndpoint: s.endPoint,
GetWebIDTokenExpiry: func() (*cr.WebIdentityToken, error) {
return &cr.WebIdentityToken{
Token: token,
}, nil
},
RoleARN: roleARN,
}
value, err := webID.Retrieve()
if err != nil {
c.Fatalf("Expected to generate STS creds, got err: %#v", err)
}
// fmt.Printf("value: %#v\n", value)
minioClient, err := minio.New(s.endpoint, &minio.Options{
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
Secure: s.secure,
Transport: s.TestSuiteCommon.client.Transport,
})
if err != nil {
c.Fatalf("Error initializing client: %v", err)
}
// Validate that the client from sts creds can access the bucket.
c.mustListObjects(ctx, minioClient, bucket)
}
func (s *TestSuiteIAM) TestOpenIDServiceAccWithRolePolicy(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
bucket := getRandomBucketName()
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket create error: %v", err)
}
// Generate web identity STS token by interacting with OpenID IDP.
token, err := MockOpenIDTestUserInteraction(ctx, testAppParams, "<EMAIL>", "dillon")
if err != nil {
c.Fatalf("mock user err: %v", err)
}
webID := cr.STSWebIdentity{
Client: s.TestSuiteCommon.client,
STSEndpoint: s.endPoint,
GetWebIDTokenExpiry: func() (*cr.WebIdentityToken, error) {
return &cr.WebIdentityToken{
Token: token,
}, nil
},
RoleARN: testRoleARN,
}
value, err := webID.Retrieve()
if err != nil {
c.Fatalf("Expected to generate STS creds, got err: %#v", err)
}
// Create an madmin client with user creds
userAdmClient, err := madmin.NewWithOptions(s.endpoint, &madmin.Options{
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
Secure: s.secure,
})
if err != nil {
c.Fatalf("Err creating user admin client: %v", err)
}
userAdmClient.SetCustomTransport(s.TestSuiteCommon.client.Transport)
// Create svc acc
cr := c.mustCreateSvcAccount(ctx, value.AccessKeyID, userAdmClient)
// 1. Check that svc account appears in listing
c.assertSvcAccAppearsInListing(ctx, userAdmClient, value.AccessKeyID, cr.AccessKey)
// 2. Check that svc account info can be queried
c.assertSvcAccInfoQueryable(ctx, userAdmClient, value.AccessKeyID, cr.AccessKey, true)
// 3. Check S3 access
c.assertSvcAccS3Access(ctx, s, cr, bucket)
// 4. Check that svc account can restrict the policy, and that the
// session policy can be updated.
c.assertSvcAccSessionPolicyUpdate(ctx, s, userAdmClient, value.AccessKeyID, bucket)
// 4. Check that service account's secret key and account status can be
// updated.
c.assertSvcAccSecretKeyAndStatusUpdate(ctx, s, userAdmClient, value.AccessKeyID, bucket)
// 5. Check that service account can be deleted.
c.assertSvcAccDeletion(ctx, s, userAdmClient, value.AccessKeyID, bucket)
}
// Constants for Policy Variables test.
var (
policyProjectA = `{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetBucketLocation",
"s3:ListAllMyBuckets"
],
"Resource": "arn:aws:s3:::*"
},
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::projecta",
"arn:aws:s3:::projecta/*"
],
"Condition": {
"ForAnyValue:StringEquals": {
"jwt:groups": [
"projecta"
]
}
}
}
]
}
`
policyProjectB = `{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetBucketLocation",
"s3:ListAllMyBuckets"
],
"Resource": "arn:aws:s3:::*"
},
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::projectb",
"arn:aws:s3:::projectb/*"
],
"Condition": {
"ForAnyValue:StringEquals": {
"jwt:groups": [
"projectb"
]
}
}
}
]
}
`
policyProjectAorB = `{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetBucketLocation",
"s3:ListAllMyBuckets"
],
"Resource": "arn:aws:s3:::*"
},
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::projectaorb",
"arn:aws:s3:::projectaorb/*"
],
"Condition": {
"ForAnyValue:StringEquals": {
"jwt:groups": [
"projecta",
"projectb"
]
}
}
}
]
}`
policyProjectsMap = map[string]string{
// grants access to bucket `projecta` if user is in group `projecta`
"projecta": policyProjectA,
// grants access to bucket `projectb` if user is in group `projectb`
"projectb": policyProjectB,
// grants access to bucket `projectaorb` if user is in either group
// `projecta` or `projectb`
"projectaorb": policyProjectAorB,
}
)
func (s *TestSuiteIAM) TestOpenIDSTSWithRolePolicyWithPolVar(c *check, roleARN string, clientApp OpenIDClientAppParams) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Create project buckets
buckets := []string{"projecta", "projectb", "projectaorb", "other"}
for _, bucket := range buckets {
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket create error: %v", err)
}
}
// Create policies
for polName, polContent := range policyProjectsMap {
err := s.adm.AddCannedPolicy(ctx, polName, []byte(polContent))
if err != nil {
c.Fatalf("policy add error: %v", err)
}
}
makeSTSClient := func(user, password string) *minio.Client {
// Generate web identity JWT by interacting with OpenID IDP.
token, err := MockOpenIDTestUserInteraction(ctx, clientApp, user, password)
if err != nil {
c.Fatalf("mock user err: %v", err)
}
// Generate STS credential.
webID := cr.STSWebIdentity{
Client: s.TestSuiteCommon.client,
STSEndpoint: s.endPoint,
GetWebIDTokenExpiry: func() (*cr.WebIdentityToken, error) {
return &cr.WebIdentityToken{
Token: token,
}, nil
},
RoleARN: roleARN,
}
value, err := webID.Retrieve()
if err != nil {
c.Fatalf("Expected to generate STS creds, got err: %#v", err)
}
// fmt.Printf("value: %#v\n", value)
minioClient, err := minio.New(s.endpoint, &minio.Options{
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
Secure: s.secure,
Transport: s.TestSuiteCommon.client.Transport,
})
if err != nil {
c.Fatalf("Error initializing client: %v", err)
}
return minioClient
}
// user dillon's groups attribute is ["projecta", "projectb"]
dillonClient := makeSTSClient("<EMAIL>", "dillon")
// Validate client's permissions
c.mustListBuckets(ctx, dillonClient)
c.mustListObjects(ctx, dillonClient, "projecta")
c.mustListObjects(ctx, dillonClient, "projectb")
c.mustListObjects(ctx, dillonClient, "projectaorb")
c.mustNotListObjects(ctx, dillonClient, "other")
// this user's groups attribute is ["projectb"]
lisaClient := makeSTSClient("<EMAIL>", "liza")
// Validate client's permissions
c.mustListBuckets(ctx, lisaClient)
c.mustNotListObjects(ctx, lisaClient, "projecta")
c.mustListObjects(ctx, lisaClient, "projectb")
c.mustListObjects(ctx, lisaClient, "projectaorb")
c.mustNotListObjects(ctx, lisaClient, "other")
}
func TestIAMWithOpenIDMultipleConfigsValidation1(t *testing.T) {
openIDServer := os.Getenv(EnvTestOpenIDServer)
openIDServer2 := os.Getenv(EnvTestOpenIDServer2)
if openIDServer == "" || openIDServer2 == "" {
t.Skip("Skipping OpenID test as enough OpenID servers are not provided.")
}
testApps := testClientApps
rolePolicies := []string{
"", // Treated as claim-based provider as no role policy is given.
"readwrite",
}
for i, testCase := range iamTestSuites {
t.Run(
fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.ServerTypeDescription),
func(t *testing.T) {
c := &check{t, testCase.serverType}
suite := testCase
suite.SetUpSuite(c)
defer suite.TearDownSuite(c)
err := suite.SetUpOpenIDs(c, testApps, rolePolicies)
if err != nil {
c.Fatalf("config with 1 claim based and 1 role based provider should pass but got: %v", err)
}
},
)
}
}
func TestIAMWithOpenIDMultipleConfigsValidation2(t *testing.T) {
openIDServer := os.Getenv(EnvTestOpenIDServer)
openIDServer2 := os.Getenv(EnvTestOpenIDServer2)
if openIDServer == "" || openIDServer2 == "" {
t.Skip("Skipping OpenID test as enough OpenID servers are not provided.")
}
testApps := testClientApps
rolePolicies := []string{
"", // Treated as claim-based provider as no role policy is given.
"", // Treated as claim-based provider as no role policy is given.
}
for i, testCase := range iamTestSuites {
t.Run(
fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.ServerTypeDescription),
func(t *testing.T) {
c := &check{t, testCase.serverType}
suite := testCase
suite.SetUpSuite(c)
defer suite.TearDownSuite(c)
err := suite.SetUpOpenIDs(c, testApps, rolePolicies)
if err == nil {
c.Fatalf("config with 2 claim based provider should fail")
}
},
)
}
}
func TestIAMWithOpenIDWithMultipleRolesServerSuite(t *testing.T) {
openIDServer := os.Getenv(EnvTestOpenIDServer)
openIDServer2 := os.Getenv(EnvTestOpenIDServer2)
if openIDServer == "" || openIDServer2 == "" {
t.Skip("Skipping OpenID test as enough OpenID servers are not provided.")
}
testApps := testClientApps
rolePolicies := []string{
"consoleAdmin",
"readwrite",
}
for i, testCase := range iamTestSuites {
t.Run(
fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.ServerTypeDescription),
func(t *testing.T) {
c := &check{t, testCase.serverType}
suite := testCase
suite.SetUpSuite(c)
err := suite.SetUpOpenIDs(c, testApps, rolePolicies)
if err != nil {
c.Fatalf("Error setting up openid providers for tests: %v", err)
}
suite.TestOpenIDSTSWithRolePolicy(c, testRoleARNs[0], testRoleMap[testRoleARNs[0]])
suite.TestOpenIDSTSWithRolePolicy(c, testRoleARNs[1], testRoleMap[testRoleARNs[1]])
suite.TestOpenIDServiceAccWithRolePolicy(c)
suite.TearDownSuite(c)
},
)
}
}
// Access Management Plugin tests
func TestIAM_AMPWithOpenIDWithMultipleRolesServerSuite(t *testing.T) {
openIDServer := os.Getenv(EnvTestOpenIDServer)
openIDServer2 := os.Getenv(EnvTestOpenIDServer2)
if openIDServer == "" || openIDServer2 == "" {
t.Skip("Skipping OpenID test as enough OpenID servers are not provided.")
}
testApps := testClientApps
rolePolicies := []string{
"consoleAdmin",
"readwrite",
}
for i, testCase := range iamTestSuites {
t.Run(
fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.ServerTypeDescription),
func(t *testing.T) {
c := &check{t, testCase.serverType}
suite := testCase
suite.SetUpSuite(c)
defer suite.TearDownSuite(c)
err := suite.SetUpOpenIDs(c, testApps, rolePolicies)
if err != nil {
c.Fatalf("Error setting up openid providers for tests: %v", err)
}
suite.SetUpAccMgmtPlugin(c)
suite.TestOpenIDSTSWithRolePolicyUnderAMP(c, testRoleARNs[0], testRoleMap[testRoleARNs[0]])
suite.TestOpenIDSTSWithRolePolicyUnderAMP(c, testRoleARNs[1], testRoleMap[testRoleARNs[1]])
suite.TestOpenIDServiceAccWithRolePolicyUnderAMP(c)
},
)
}
}
func (s *TestSuiteIAM) TestOpenIDSTSWithRolePolicyUnderAMP(c *check, roleARN string, clientApp OpenIDClientAppParams) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
bucket := getRandomBucketName()
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket create error: %v", err)
}
// Generate web identity JWT by interacting with OpenID IDP.
token, err := MockOpenIDTestUserInteraction(ctx, clientApp, "<EMAIL>", "<PASSWORD>")
if err != nil {
c.Fatalf("mock user err: %v", err)
}
// Generate STS credential.
webID := cr.STSWebIdentity{
Client: s.TestSuiteCommon.client,
STSEndpoint: s.endPoint,
GetWebIDTokenExpiry: func() (*cr.WebIdentityToken, error) {
return &cr.WebIdentityToken{
Token: token,
}, nil
},
RoleARN: roleARN,
}
value, err := webID.Retrieve()
if err != nil {
c.Fatalf("Expected to generate STS creds, got err: %#v", err)
}
// fmt.Printf("value: %#v\n", value)
minioClient, err := minio.New(s.endpoint, &minio.Options{
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
Secure: s.secure,
Transport: s.TestSuiteCommon.client.Transport,
})
if err != nil {
c.Fatalf("Error initializing client: %v", err)
}
// Validate that the client from sts creds can access the bucket.
c.mustListObjects(ctx, minioClient, bucket)
// Validate that the client from STS creds cannot upload any object as
// it is denied by the plugin.
c.mustNotUpload(ctx, minioClient, bucket)
}
func (s *TestSuiteIAM) TestOpenIDServiceAccWithRolePolicyUnderAMP(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
bucket := getRandomBucketName()
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket create error: %v", err)
}
// Generate web identity STS token by interacting with OpenID IDP.
token, err := MockOpenIDTestUserInteraction(ctx, testAppParams, "<EMAIL>", "<PASSWORD>")
if err != nil {
c.Fatalf("mock user err: %v", err)
}
webID := cr.STSWebIdentity{
Client: s.TestSuiteCommon.client,
STSEndpoint: s.endPoint,
GetWebIDTokenExpiry: func() (*cr.WebIdentityToken, error) {
return &cr.WebIdentityToken{
Token: token,
}, nil
},
RoleARN: testRoleARN,
}
value, err := webID.Retrieve()
if err != nil {
c.Fatalf("Expected to generate STS creds, got err: %#v", err)
}
// Create an madmin client with user creds
userAdmClient, err := madmin.NewWithOptions(s.endpoint, &madmin.Options{
Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken),
Secure: s.secure,
})
if err != nil {
c.Fatalf("Err creating user admin client: %v", err)
}
userAdmClient.SetCustomTransport(s.TestSuiteCommon.client.Transport)
// Create svc acc
cr := c.mustCreateSvcAccount(ctx, value.AccessKeyID, userAdmClient)
// 1. Check that svc account appears in listing
c.assertSvcAccAppearsInListing(ctx, userAdmClient, value.AccessKeyID, cr.AccessKey)
// 2. Check that svc account info can be queried
c.assertSvcAccInfoQueryable(ctx, userAdmClient, value.AccessKeyID, cr.AccessKey, true)
// 3. Check S3 access
c.assertSvcAccS3Access(ctx, s, cr, bucket)
// 3.1 Validate that the client from STS creds cannot upload any object as
// it is denied by the plugin.
c.mustNotUpload(ctx, s.getUserClient(c, cr.AccessKey, cr.SecretKey, ""), bucket)
// Check that session policies do not apply - as policy enforcement is
// delegated to plugin.
{
svcAK, svcSK := mustGenerateCredentials(c)
// This policy does not allow listing objects.
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject"
],
"Resource": [
"arn:aws:s3:::%s/*"
]
}
]
}`, bucket))
cr, err := userAdmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
Policy: policyBytes,
TargetUser: value.AccessKeyID,
AccessKey: svcAK,
SecretKey: svcSK,
})
if err != nil {
c.Fatalf("Unable to create svc acc: %v", err)
}
svcClient := s.getUserClient(c, cr.AccessKey, cr.SecretKey, "")
// Though the attached policy does not allow listing, it will be
// ignored because the plugin allows it.
c.mustListObjects(ctx, svcClient, bucket)
}
// 4. Check that service account's secret key and account status can be
// updated.
c.assertSvcAccSecretKeyAndStatusUpdate(ctx, s, userAdmClient, value.AccessKeyID, bucket)
// 5. Check that service account can be deleted.
c.assertSvcAccDeletion(ctx, s, userAdmClient, value.AccessKeyID, bucket)
}
<file_sep>#!/bin/bash
helm package helm/minio -d helm-releases/
helm repo index --merge index.yaml --url https://charts.min.io .
<file_sep>#!/bin/bash -e
set -E
set -o pipefail
set -x
WORK_DIR="$PWD/.verify-$RANDOM"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
fi
function start_minio_5drive() {
start_port=$1
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=<PASSWORD>
export MC_HOST_minio="http://minio:[email protected]:${start_port}/"
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
export MINIO_CI_CD=1
MC_BUILD_DIR="mc-$RANDOM"
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
echo "failed to download https://github.com/minio/mc"
purge "${MC_BUILD_DIR}"
exit 1
fi
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
# remove mc source.
purge "${MC_BUILD_DIR}"
"${WORK_DIR}/mc" cp --quiet -r "buildscripts/cicd-corpus/" "${WORK_DIR}/cicd-corpus/"
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/cicd-corpus/disk{1...5}" >"${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 5
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
"${WORK_DIR}/mc" stat minio/bucket/testobj
pkill minio
sleep 3
}
function main() {
start_port=$(shuf -i 10000-65000 -n 1)
start_minio_5drive ${start_port}
}
function purge() {
rm -rf "$1"
}
(main "$@")
rv=$?
purge "$WORK_DIR"
exit "$rv"
<file_sep># AssumeRoleWithCustomToken [](https://slack.min.io)
## Introduction
To integrate with custom authentication methods using the [Identity Management Plugin](../iam/identity-management-plugin.md)), MinIO provides an STS API extension called `AssumeRoleWithCustomToken`.
After configuring the plugin, use the generated Role ARN with `AssumeRoleWithCustomToken` to get temporary credentials to access object storage.
## API Request
To make an STS API request with this method, send a POST request to the MinIO endpoint with following query parameters:
| Parameter | Type | Required | |
|-----------------|---------|----------|----------------------------------------------------------------------|
| Action | String | Yes | Value must be `AssumeRoleWithCustomToken` |
| Version | String | Yes | Value must be `2011-06-15` |
| Token | String | Yes | Token to be authenticated by identity plugin |
| RoleArn | String | Yes | Must match the Role ARN generated for the identity plugin |
| DurationSeconds | Integer | No | Duration of validity of generated credentials. Must be at least 900. |
The validity duration of the generated STS credentials is the minimum of the `DurationSeconds` parameter (if passed) and the validity duration returned by the Identity Management Plugin.
## API Response
XML response for this API is similar to [AWS STS AssumeRoleWithWebIdentity](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html#API_AssumeRoleWithWebIdentity_ResponseElements)
## Example request and response
Sample request with `curl`:
```sh
curl -XPOST 'http://localhost:9001/?Action=AssumeRoleWithCustomToken&Version=2011-06-15&Token=aaa&RoleArn=arn:minio:iam:::role/idmp-vGxBdLkOc8mQPU1-UQbBh-yWWVQ'
```
Prettified Response:
```xml
<?xml version="1.0" encoding="UTF-8"?>
<AssumeRoleWithCustomTokenResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
<AssumeRoleWithCustomTokenResult>
<Credentials>
<AccessKeyId><KEY></AccessKeyId>
<SecretAccessKey><KEY>SecretAccessKey>
<Expiration>2022-05-25T19:56:34Z</Expiration>
<SessionToken><KEY>SessionToken>
</Credentials>
<AssumedUser>custom:Alice</AssumedUser>
</AssumeRoleWithCustomTokenResult>
<ResponseMetadata>
<RequestId>16F26E081E36DE63</RequestId>
</ResponseMetadata>
</AssumeRoleWithCustomTokenResponse>
```
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"encoding/json"
"encoding/xml"
"fmt"
"io"
"net/http"
"time"
"github.com/minio/minio/internal/bucket/replication"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/bucket/policy"
)
// PutBucketReplicationConfigHandler - PUT Bucket replication configuration.
// ----------
// Add a replication configuration on the specified bucket as specified in https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html
func (api objectAPIHandlers) PutBucketReplicationConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketReplicationConfig")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.PutReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
// Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if globalSiteReplicationSys.isEnabled() && logger.GetReqInfo(ctx).Cred.AccessKey != globalActiveCred.AccessKey {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationDenyEditError), r.URL)
return
}
if versioned := globalBucketVersioningSys.Enabled(bucket); !versioned {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationNeedsVersioningError), r.URL)
return
}
replicationConfig, err := replication.ParseConfig(io.LimitReader(r.Body, r.ContentLength))
if err != nil {
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
apiErr.Description = err.Error()
writeErrorResponse(ctx, w, apiErr, r.URL)
return
}
sameTarget, apiErr := validateReplicationDestination(ctx, bucket, replicationConfig, true)
if apiErr != noError {
writeErrorResponse(ctx, w, apiErr, r.URL)
return
}
// Validate the received bucket replication config
if err = replicationConfig.Validate(bucket, sameTarget); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
configData, err := xml.Marshal(replicationConfig)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketReplicationConfig, configData); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Write success response.
writeSuccessResponseHeadersOnly(w)
}
// GetBucketReplicationConfigHandler - GET Bucket replication configuration.
// ----------
// Gets the replication configuration for a bucket.
func (api objectAPIHandlers) GetBucketReplicationConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketReplicationConfig")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
// check if user has permissions to perform this operation
if s3Error := checkRequestAuthType(ctx, r, policy.GetReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
// Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
config, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
configData, err := xml.Marshal(config)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Write success response.
writeSuccessResponseXML(w, configData)
}
// DeleteBucketReplicationConfigHandler - DELETE Bucket replication config.
// ----------
func (api objectAPIHandlers) DeleteBucketReplicationConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteBucketReplicationConfig")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.PutReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
// Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if globalSiteReplicationSys.isEnabled() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationDenyEditError), r.URL)
return
}
if _, err := globalBucketMetadataSys.Delete(ctx, bucket, bucketReplicationConfig); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
for _, tgt := range targets.Targets {
if err := globalBucketTargetSys.RemoveTarget(ctx, bucket, tgt.Arn); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}
if _, err := globalBucketMetadataSys.Delete(ctx, bucket, bucketTargetsFile); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Write success response.
writeSuccessResponseHeadersOnly(w)
}
// GetBucketReplicationMetricsHandler - GET Bucket replication metrics.
// ----------
// Gets the replication metrics for a bucket.
func (api objectAPIHandlers) GetBucketReplicationMetricsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketReplicationMetrics")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
// check if user has permissions to perform this operation
if s3Error := checkRequestAuthType(ctx, r, policy.GetReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
// Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if _, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
var usageInfo BucketUsageInfo
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
if err == nil && !dataUsageInfo.LastUpdate.IsZero() {
usageInfo = dataUsageInfo.BucketsUsage[bucket]
}
w.Header().Set(xhttp.ContentType, string(mimeJSON))
enc := json.NewEncoder(w)
stats := globalReplicationStats.getLatestReplicationStats(bucket, usageInfo)
bwRpt := globalNotificationSys.GetBandwidthReports(ctx, bucket)
bwMap := bwRpt.BucketStats[bucket]
for arn, st := range stats.Stats {
if bwMap != nil {
if bw, ok := bwMap[arn]; ok {
st.BandWidthLimitInBytesPerSecond = bw.LimitInBytesPerSecond
st.CurrentBandwidthInBytesPerSecond = bw.CurrentBandwidthInBytesPerSecond
stats.Stats[arn] = st
}
}
}
if err = enc.Encode(stats); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
// ResetBucketReplicationStartHandler - starts a replication reset for all objects in a bucket which
// qualify for replication and re-sync the object(s) to target, provided ExistingObjectReplication is
// enabled for the qualifying rule. This API is a MinIO only extension provided for situations where
// remote target is entirely lost,and previously replicated objects need to be re-synced. If resync is
// already in progress it returns an error
func (api objectAPIHandlers) ResetBucketReplicationStartHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ResetBucketReplicationStart")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
durationStr := r.URL.Query().Get("older-than")
arn := r.URL.Query().Get("arn")
resetID := r.URL.Query().Get("reset-id")
if resetID == "" {
resetID = mustGetUUID()
}
var (
days time.Duration
err error
)
if durationStr != "" {
days, err = time.ParseDuration(durationStr)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, InvalidArgument{
Bucket: bucket,
Err: fmt.Errorf("invalid query parameter older-than %s for %s : %w", durationStr, bucket, err),
}), r.URL)
return
}
}
resetBeforeDate := UTCNow().AddDate(0, 0, -1*int(days/24))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.ResetBucketReplicationStateAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
// Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
config, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
hasARN, hasExistingObjEnabled := config.HasExistingObjectReplication(arn)
if !hasARN {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrRemoteTargetNotFoundError), r.URL)
return
}
if !hasExistingObjEnabled {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationNoExistingObjects), r.URL)
return
}
tgtArns := config.FilterTargetArns(
replication.ObjectOpts{
OpType: replication.ResyncReplicationType,
TargetArn: arn,
})
if len(tgtArns) == 0 {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
Bucket: bucket,
Err: fmt.Errorf("Remote target ARN %s missing or ineligible for replication resync", arn),
}), r.URL)
return
}
if len(tgtArns) > 1 && arn == "" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
Bucket: bucket,
Err: fmt.Errorf("ARN should be specified for replication reset"),
}), r.URL)
return
}
var rinfo ResyncTargetsInfo
target := globalBucketTargetSys.GetRemoteBucketTargetByArn(ctx, bucket, tgtArns[0])
target.ResetBeforeDate = UTCNow().AddDate(0, 0, -1*int(days/24))
target.ResetID = resetID
rinfo.Targets = append(rinfo.Targets, ResyncTarget{Arn: tgtArns[0], ResetID: target.ResetID})
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, true); err != nil {
switch err.(type) {
case RemoteTargetConnectionErr:
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrReplicationRemoteConnectionError, err), r.URL)
default:
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
}
return
}
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
if err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
tgtBytes, err := json.Marshal(&targets)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if err := globalReplicationPool.resyncer.start(ctx, objectAPI, resyncOpts{
bucket: bucket,
arn: arn,
resyncID: resetID,
resyncBefore: resetBeforeDate,
}); err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
Bucket: bucket,
Err: err,
}), r.URL)
return
}
data, err := json.Marshal(rinfo)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Write success response.
writeSuccessResponseJSON(w, data)
}
// ResetBucketReplicationStatusHandler - returns the status of replication reset.
// This API is a MinIO only extension
func (api objectAPIHandlers) ResetBucketReplicationStatusHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ResetBucketReplicationStatus")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
arn := r.URL.Query().Get("arn")
var err error
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.ResetBucketReplicationStateAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
// Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if _, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
var tgtStats map[string]TargetReplicationResyncStatus
globalReplicationPool.resyncer.RLock()
brs, ok := globalReplicationPool.resyncer.statusMap[bucket]
if ok {
tgtStats = brs.cloneTgtStats()
}
globalReplicationPool.resyncer.RUnlock()
if !ok {
brs, err = loadBucketResyncMetadata(ctx, bucket, objectAPI)
if err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
Bucket: bucket,
Err: fmt.Errorf("No replication resync status available for %s", arn),
}), r.URL)
return
}
tgtStats = brs.cloneTgtStats()
}
var rinfo ResyncTargetsInfo
for tarn, st := range tgtStats {
if arn != "" && tarn != arn {
continue
}
rinfo.Targets = append(rinfo.Targets, ResyncTarget{
Arn: tarn,
ResetID: st.ResyncID,
StartTime: st.StartTime,
EndTime: st.LastUpdate,
ResyncStatus: st.ResyncStatus.String(),
ReplicatedSize: st.ReplicatedSize,
ReplicatedCount: st.ReplicatedCount,
FailedSize: st.FailedSize,
FailedCount: st.FailedCount,
Bucket: st.Bucket,
Object: st.Object,
})
}
data, err := json.Marshal(rinfo)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Write success response.
writeSuccessResponseJSON(w, data)
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package bandwidth
import (
"context"
"sync"
"time"
"golang.org/x/time/rate"
)
type throttle struct {
*rate.Limiter
NodeBandwidthPerSec int64
}
// Monitor holds the state of the global bucket monitor
type Monitor struct {
tlock sync.RWMutex // mutex for bucketThrottle
bucketThrottle map[string]map[string]*throttle
mlock sync.RWMutex // mutex for activeBuckets map
activeBuckets map[string]map[string]*bucketMeasurement // Buckets with objects in flight
bucketMovingAvgTicker *time.Ticker // Ticker for calculating moving averages
ctx context.Context // Context for generate
NodeCount uint64
}
// NewMonitor returns a monitor with defaults.
func NewMonitor(ctx context.Context, numNodes uint64) *Monitor {
m := &Monitor{
activeBuckets: make(map[string]map[string]*bucketMeasurement),
bucketThrottle: make(map[string]map[string]*throttle),
bucketMovingAvgTicker: time.NewTicker(2 * time.Second),
ctx: ctx,
NodeCount: numNodes,
}
go m.trackEWMA()
return m
}
func (m *Monitor) updateMeasurement(bucket, arn string, bytes uint64) {
m.mlock.Lock()
defer m.mlock.Unlock()
tm, ok := m.activeBuckets[bucket]
if !ok {
tm = make(map[string]*bucketMeasurement)
}
measurement, ok := tm[arn]
if !ok {
measurement = &bucketMeasurement{}
}
measurement.incrementBytes(bytes)
m.activeBuckets[bucket][arn] = measurement
}
// SelectionFunction for buckets
type SelectionFunction func(bucket string) bool
// SelectBuckets will select all the buckets passed in.
func SelectBuckets(buckets ...string) SelectionFunction {
if len(buckets) == 0 {
return func(bucket string) bool {
return true
}
}
return func(bucket string) bool {
for _, b := range buckets {
if b == "" || b == bucket {
return true
}
}
return false
}
}
// Details for the measured bandwidth
type Details struct {
LimitInBytesPerSecond int64 `json:"limitInBits"`
CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth"`
}
// BucketBandwidthReport captures the details for all buckets.
type BucketBandwidthReport struct {
BucketStats map[string]map[string]Details `json:"bucketStats,omitempty"`
}
// GetReport gets the report for all bucket bandwidth details.
func (m *Monitor) GetReport(selectBucket SelectionFunction) *BucketBandwidthReport {
m.mlock.RLock()
defer m.mlock.RUnlock()
return m.getReport(selectBucket)
}
func (m *Monitor) getReport(selectBucket SelectionFunction) *BucketBandwidthReport {
report := &BucketBandwidthReport{
BucketStats: make(map[string]map[string]Details),
}
for bucket, bucketMeasurementMap := range m.activeBuckets {
if !selectBucket(bucket) {
continue
}
m.tlock.RLock()
report.BucketStats[bucket] = make(map[string]Details)
if tgtThrottle, ok := m.bucketThrottle[bucket]; ok {
for arn, throttle := range tgtThrottle {
var currBw float64
if bucketMeasurement, ok := bucketMeasurementMap[arn]; ok {
currBw = bucketMeasurement.getExpMovingAvgBytesPerSecond()
}
report.BucketStats[bucket][arn] = Details{
LimitInBytesPerSecond: throttle.NodeBandwidthPerSec * int64(m.NodeCount),
CurrentBandwidthInBytesPerSecond: currBw,
}
}
}
m.tlock.RUnlock()
}
return report
}
func (m *Monitor) trackEWMA() {
for {
select {
case <-m.bucketMovingAvgTicker.C:
m.updateMovingAvg()
case <-m.ctx.Done():
return
}
}
}
func (m *Monitor) updateMovingAvg() {
m.mlock.Lock()
defer m.mlock.Unlock()
for _, bucketMeasurement := range m.activeBuckets {
for _, measurement := range bucketMeasurement {
measurement.updateExponentialMovingAverage(time.Now())
}
}
}
func (m *Monitor) getBucketMeasurement(bucket, arn string, initTime time.Time) map[string]*bucketMeasurement {
bucketTracker, ok := m.activeBuckets[bucket]
if !ok {
bucketTracker = make(map[string]*bucketMeasurement)
bucketTracker[arn] = newBucketMeasurement(initTime)
m.activeBuckets[bucket] = bucketTracker
}
return bucketTracker
}
// track returns the measurement object for bucket
func (m *Monitor) track(bucket, arn string) {
m.mlock.Lock()
defer m.mlock.Unlock()
m.getBucketMeasurement(bucket, arn, time.Now())
}
// DeleteBucket deletes monitoring the 'bucket'
func (m *Monitor) DeleteBucket(bucket string) {
m.tlock.Lock()
delete(m.bucketThrottle, bucket)
m.tlock.Unlock()
m.mlock.Lock()
delete(m.activeBuckets, bucket)
m.mlock.Unlock()
}
// DeleteBucketThrottle deletes monitoring for a bucket's target
func (m *Monitor) DeleteBucketThrottle(bucket, arn string) {
m.tlock.Lock()
if _, ok := m.bucketThrottle[bucket]; ok {
delete(m.bucketThrottle[bucket], arn)
}
m.tlock.Unlock()
m.mlock.Lock()
if _, ok := m.activeBuckets[bucket]; ok {
delete(m.activeBuckets[bucket], arn)
}
m.mlock.Unlock()
}
// throttle returns currently configured throttle for this bucket
func (m *Monitor) throttle(bucket, arn string) *throttle {
m.tlock.RLock()
defer m.tlock.RUnlock()
return m.bucketThrottle[bucket][arn]
}
// SetBandwidthLimit sets the bandwidth limit for a bucket
func (m *Monitor) SetBandwidthLimit(bucket, arn string, limit int64) {
m.tlock.Lock()
defer m.tlock.Unlock()
bw := limit / int64(m.NodeCount)
tgtMap, ok := m.bucketThrottle[bucket]
if !ok {
tgtMap = make(map[string]*throttle)
tgtMap[arn] = &throttle{
NodeBandwidthPerSec: bw,
}
}
th, ok := tgtMap[arn]
if !ok {
th = &throttle{}
}
th.NodeBandwidthPerSec = bw
tgtMap[arn] = th
newlimit := rate.Every(time.Second / time.Duration(tgtMap[arn].NodeBandwidthPerSec))
tgtMap[arn].Limiter = rate.NewLimiter(newlimit, int(tgtMap[arn].NodeBandwidthPerSec))
m.bucketThrottle[bucket] = tgtMap
}
// IsThrottled returns true if a bucket has bandwidth throttling enabled.
func (m *Monitor) IsThrottled(bucket, arn string) bool {
m.tlock.RLock()
defer m.tlock.RUnlock()
th, ok := m.bucketThrottle[bucket]
if !ok {
return ok
}
_, ok = th[arn]
return ok
}
<file_sep># MinIO Server Debugging Guide [](https://slack.min.io) [](https://hub.docker.com/r/minio/minio/)
## HTTP Trace
HTTP tracing can be enabled by using [`mc admin trace`](https://github.com/minio/mc/blob/master/docs/minio-admin-complete-guide.md#command-trace---display-minio-server-http-trace) command.
Example:
```sh
minio server /data
```
Default trace is succinct only to indicate the API operations being called and the HTTP response status.
```sh
mc admin trace myminio
```
To trace entire HTTP request
```sh
mc admin trace --verbose myminio
```
To trace entire HTTP request and also internode communication
```sh
mc admin trace --all --verbose myminio
```
## Subnet Health
Subnet Health diagnostics help ensure that the underlying infrastructure that runs MinIO is configured correctly, and is functioning properly. This test is one-shot long running one, that is recommended to be run as soon as the cluster is first provisioned, and each time a failure scenario is encountered. Note that the test incurs majority of the available resources on the system. Care must be taken when using this to debug failure scenario, so as to prevent larger outages. Health tests can be triggered using `mc support diagnostics` command.
Example:
```sh
minio server /data{1...4}
```
The command takes no flags
```sh
mc support diagnostics myminio/
```
The output printed will be of the form
```sh
โ Admin Info ... โ
โ CPU ... โ
โ Disk Hardware ... โ
โ Os Info ... โ
โ Mem Info ... โ
โ Process Info ... โ
โ Config ... โ
โ Drive ... โ
โ Net ... โ
*********************************************************************************
WARNING!!
** THIS FILE MAY CONTAIN SENSITIVE INFORMATION ABOUT YOUR ENVIRONMENT **
** PLEASE INSPECT CONTENTS BEFORE SHARING IT ON ANY PUBLIC FORUM **
*********************************************************************************
mc: Health data saved to dc-11-health_20200321053323.json.gz
```
The gzipped output contains debugging information for your system
## Decoding Metadata
Metadata is stored in `xl.meta` files for erasure coded objects. Each disk in the set containing the object has this file. The file format is a binary format and therefore requires tools to view values.
### Installing xl-meta
To install, [Go](https://golang.org/dl/) must be installed. Once installed, execute this to install the binary:
```bash
go install github.com/minio/minio/docs/debugging/xl-meta@latest
```
### Using xl-meta
Executing `xl-meta` will look for an `xl.meta` in the current folder and decode it to JSON. It is also possible to specify multiple files or wildcards, for example `xl-meta ./**/xl.meta` will output decoded metadata recursively. It is possible to view what inline data is stored inline in the metadata using `--data` parameter `xl-meta -data xl.json` will display an id -> data size. To export inline data to a file use the `--export` option.
### Remotely Inspecting backend data
`mc support inspect` allows collecting files based on *path* from all backend drives. Matching files will be collected in a zip file with their respective host+drive+path. A MinIO host from October 2021 or later is required for full functionality. Syntax is `mc support inspect ALIAS/path/to/files`. This can for example be used to collect `xl.meta` from objects that are misbehaving. To collect `xl.meta` from a specific object, for example placed at `ALIAS/bucket/path/to/file.txt` append `/xl.meta`, for instance `mc support inspect ALIAS/bucket/path/to/file.txt/xl.meta`. All files can be collected, so this can also be used to retrieve `part.*` files, etc.
Wildcards can be used, for example `mc support inspect ALIAS/bucket/path/**/xl.meta` will collect all `xl.meta` recursively. `mc support inspect ALIAS/bucket/path/to/file.txt/*/part.*` will collect parts for all versions for the object located at `bucket/path/to/file.txt`.
`xl-meta` accepts zip files as input and will output all `xl.meta` files found within the archive. For example:
```
$ mc support inspect play/test123/test*/xl.meta
mc: File data successfully downloaded as inspect.6f96b336.zip
$ xl-meta inspect.6f96b336.zip
{
"bf6178f9-4014-4008-9699-86f2fac62226/test123/testw3c.pdf/xl.meta": {"Versions":[{"Type":1,"V2Obj":{"ID":"aGEA/ZUOR4ueRIZsAgfDqA==","DDir":"9MMwM47bS+K6KvQqN3hlDw==","EcAlgo":1,"EcM":2,"EcN":2,"EcBSize":1048576,"EcIndex":4,"EcDist":[4,1,2,3],"CSumAlgo":1,"PartNums":[1],"PartETags":[""],"PartSizes":[101974],"PartASizes":[176837],"Size":101974,"MTime":1634106631319256439,"MetaSys":{"X-Minio-Internal-compression":"a2xhdXNwb3N0L2NvbXByZXNzL3My","X-Minio-Internal-actual-size":"MTc2ODM3","x-minio-internal-objectlock-legalhold-timestamp":"MjAyMS0xMC0xOVQyMjozNTo0Ni4zNTE4MDU3NTda"},"MetaUsr":{"x-amz-object-lock-mode":"COMPLIANCE","x-amz-object-lock-retain-until-date":"2022-10-13T06:30:31.319Z","etag":"67ed8f49b7137cb957858ce468f2e79e","content-type":"application/pdf","x-amz-object-lock-legal-hold":"OFF"}}}]},
"fe012443-6ba9-4ef2-bb94-b729d2060c78/test123/testw3c.pdf/xl.meta": {"Versions":[{"Type":1,"V2Obj":{"ID":"aGEA/ZUOR4ueRIZsAgfDqA==","DDir":"9MMwM47bS+K6KvQqN3hlDw==","EcAlgo":1,"EcM":2,"EcN":2,"EcBSize":1048576,"EcIndex":1,"EcDist":[4,1,2,3],"CSumAlgo":1,"PartNums":[1],"PartETags":[""],"PartSizes":[101974],"PartASizes":[176837],"Size":101974,"MTime":1634106631319256439,"MetaSys":{"X-Minio-Internal-compression":"a2xhdXNwb3N0L2NvbXByZXNzL3My","X-Minio-Internal-actual-size":"MTc2ODM3","x-minio-internal-objectlock-legalhold-timestamp":"MjAyMS0xMC0xOVQyMjozNTo0Ni4zNTE4MDU3NTda"},"MetaUsr":{"content-type":"application/pdf","x-amz-object-lock-legal-hold":"OFF","x-amz-object-lock-mode":"COMPLIANCE","x-amz-object-lock-retain-until-date":"2022-10-13T06:30:31.319Z","etag":"67ed8f49b7137cb957858ce468f2e79e"}}}]},
"5dcb9f38-08ea-4728-bb64-5cecc7102436/test123/testw3c.pdf/xl.meta": {"Versions":[{"Type":1,"V2Obj":{"ID":"aGEA/ZUOR4ueRIZsAgfDqA==","DDir":"9MMwM47bS+K6KvQqN3hlDw==","EcAlgo":1,"EcM":2,"EcN":2,"EcBSize":1048576,"EcIndex":2,"EcDist":[4,1,2,3],"CSumAlgo":1,"PartNums":[1],"PartETags":[""],"PartSizes":[101974],"PartASizes":[176837],"Size":101974,"MTime":1634106631319256439,"MetaSys":{"X-Minio-Internal-compression":"a2xhdXNwb3N0L2NvbXByZXNzL3My","X-Minio-Internal-actual-size":"MTc2ODM3","x-minio-internal-objectlock-legalhold-timestamp":"MjAyMS0xMC0xOVQyMjozNTo0Ni4zNTE4MDU3NTda"},"MetaUsr":{"content-type":"application/pdf","x-amz-object-lock-legal-hold":"OFF","x-amz-object-lock-mode":"COMPLIANCE","x-amz-object-lock-retain-until-date":"2022-10-13T06:30:31.319Z","etag":"67ed8f49b7137cb957858ce468f2e79e"}}}]},
"48beacc7-4be0-4660-9026-4eceaf147504/test123/testw3c.pdf/xl.meta": {"Versions":[{"Type":1,"V2Obj":{"ID":"aGEA/ZUOR4ueRIZsAgfDqA==","DDir":"9MMwM47bS+K6KvQqN3hlDw==","EcAlgo":1,"EcM":2,"EcN":2,"EcBSize":1048576,"EcIndex":3,"EcDist":[4,1,2,3],"CSumAlgo":1,"PartNums":[1],"PartETags":[""],"PartSizes":[101974],"PartASizes":[176837],"Size":101974,"MTime":1634106631319256439,"MetaSys":{"X-Minio-Internal-compression":"a2xhdXNwb3N0L2NvbXByZXNzL3My","X-Minio-Internal-actual-size":"MTc2ODM3","x-minio-internal-objectlock-legalhold-timestamp":"MjAyMS0xMC0xOVQyMjozNTo0Ni4zNTE4MDU3NTda"},"MetaUsr":{"x-amz-object-lock-retain-until-date":"2022-10-13T06:30:31.319Z","x-amz-object-lock-legal-hold":"OFF","etag":"67ed8f49b7137cb957858ce468f2e79e","content-type":"application/pdf","x-amz-object-lock-mode":"COMPLIANCE"}}}]}
}
```
Optionally `--encrypt` can be specified. This will output an encrypted file and a decryption key:
```
$ mc support inspect --encrypt play/test123/test*/*/part.*
mc: Encrypted file data successfully downloaded as inspect.ad2b43d8.enc
mc: Decryption key: <KEY>
mc: The decryption key will ONLY be shown here. It cannot be recovered.
mc: The encrypted file can safely be shared without the decryption key.
mc: Even with the decryption key, data stored with encryption cannot be accessed.
```
This file can be decrypted using the decryption tool below:
### Installing decryption tool
To install, [Go](https://golang.org/dl/) must be installed.
Once installed, execute this to install the binary:
```bash
go install github.com/minio/minio/docs/debugging/inspect@latest
```
### Usage
To decrypt the file above:
```
$ inspect -key=<KEY> inspect.ad2b43d8.enc
Output decrypted to inspect.ad2b43d8.zip
```
If `--key` is not specified an interactive prompt will ask for it. The file name will contain the beginning of the key. This can be used to verify that the key is for the encrypted file.
<file_sep>#!/bin/bash -e
set -E
set -o pipefail
set -x
WORK_DIR="$PWD/.verify-$RANDOM"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
fi
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
fi
function start_minio_4drive() {
start_port=$1
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=<PASSWORD>
export MC_HOST_minio="http://minio:[email protected]:${start_port}/"
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
export MINIO_CI_CD=1
mkdir ${WORK_DIR}
C_PWD=${PWD}
if [ ! -x "$PWD/mc" ]; then
MC_BUILD_DIR="mc-$RANDOM"
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
echo "failed to download https://github.com/minio/mc"
purge "${MC_BUILD_DIR}"
exit 1
fi
(cd "${MC_BUILD_DIR}" && go build -o "$C_PWD/mc")
# remove mc source.
purge "${MC_BUILD_DIR}"
fi
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/disk{1...4}" >"${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 5
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
"${PWD}/mc" mb --with-versioning minio/bucket
for i in $(seq 1 4); do
"${PWD}/mc" cp /etc/hosts minio/bucket/testobj
sudo chown -R root. "${WORK_DIR}/disk${i}"
"${PWD}/mc" cp /etc/hosts minio/bucket/testobj
sudo chown -R ${USER}. "${WORK_DIR}/disk${i}"
done
for vid in $("${PWD}/mc" ls --json --versions minio/bucket/testobj | jq -r .versionId); do
"${PWD}/mc" cat --vid "${vid}" minio/bucket/testobj | md5sum
done
pkill minio
sleep 3
}
function main() {
start_port=$(shuf -i 10000-65000 -n 1)
start_minio_4drive ${start_port}
}
function purge() {
rm -rf "$1"
}
(main "$@")
rv=$?
purge "$WORK_DIR"
exit "$rv"
<file_sep># MinIO Batch Job
MinIO Batch jobs is an MinIO object management feature that lets you manage objects at scale. Jobs currently supported by MinIO
- Replicate objects between buckets on multiple sites
Upcoming Jobs
- Copy objects from NAS to MinIO
- Copy objects from HDFS to MinIO
## Replication Job
To perform replication via batch jobs, you create a job. The job consists of a job description YAML that describes
- Source location from where the objects must be copied from
- Target location from where the objects must be copied to
- Fine grained filtering is available to pick relevant objects from source to copy from
MinIO batch jobs framework also provides
- Retrying a failed job automatically driven by user input
- Monitoring job progress in real-time
- Send notifications upon completion or failure to user configured target
Following YAML describes the structure of a replication job, each value is documented and self-describing.
```yaml
replicate:
apiVersion: v1
# source of the objects to be replicated
source:
type: TYPE # valid values are "minio"
bucket: BUCKET
prefix: PREFIX
# NOTE: if source is remote then target must be "local"
# endpoint: ENDPOINT
# credentials:
# accessKey: ACCESS-KEY
# secretKey: SECRET-KEY
# sessionToken: SESSION-TOKEN # Available when rotating credentials are used
# target where the objects must be replicated
target:
type: TYPE # valid values are "minio"
bucket: BUCKET
prefix: PREFIX
# NOTE: if target is remote then source must be "local"
# endpoint: ENDPOINT
# credentials:
# accessKey: ACCESS-KEY
# secretKey: SECRET-KEY
# sessionToken: SESSION-TOKEN # Available when rotating credentials are used
# optional flags based filtering criteria
# for all source objects
flags:
filter:
newerThan: "7d" # match objects newer than this value (e.g. 7d10h31s)
olderThan: "7d" # match objects older than this value (e.g. 7d10h31s)
createdAfter: "date" # match objects created after "date"
createdBefore: "date" # match objects created before "date"
## NOTE: tags are not supported when "source" is remote.
# tags:
# - key: "name"
# value: "pick*" # match objects with tag 'name', with all values starting with 'pick'
## NOTE: metadata filter not supported when "source" is non MinIO.
# metadata:
# - key: "content-type"
# value: "image/*" # match objects with 'content-type', with all values starting with 'image/'
notify:
endpoint: "https://notify.endpoint" # notification endpoint to receive job status events
token: "<PASSWORD>" # optional authentication token for the notification endpoint
retry:
attempts: 10 # number of retries for the job before giving up
delay: "500ms" # least amount of delay between each retry
```
You can create and run multiple 'replication' jobs at a time there are no predefined limits set.
## Batch Jobs Terminology
### Job
A job is the basic unit of work for MinIO Batch Job. A job is a self describing YAML, once this YAML is submitted and evaluated - MinIO performs the requested actions on each of the objects obtained under the described criteria in job YAML file.
### Type
Type describes the job type, such as replicating objects between MinIO sites. Each job performs a single type of operation across all objects that match the job description criteria.
## Batch Jobs via Commandline
[mc](http://github.com/minio/mc) provides 'mc batch' command to create, start and manage submitted jobs.
```
NAME:
mc batch - manage batch jobs
USAGE:
mc batch COMMAND [COMMAND FLAGS | -h] [ARGUMENTS...]
COMMANDS:
generate generate a new batch job definition
start start a new batch job
list, ls list all current batch jobs
status summarize job events on MinIO server in real-time
describe describe job definition for a job
```
### Generate a job yaml
```
mc batch generate alias/ replicate
```
### Start the batch job (returns back the JID)
```
mc batch start alias/ ./replicate.yaml
Successfully start 'replicate' job `E24HH4nNMcgY5taynaPfxu` on '2022-09-26 17:19:06.296974771 -0700 PDT'
```
### List all batch jobs
```
mc batch list alias/
ID TYPE USER STARTED
E24HH4nNMcgY5taynaPfxu replicate minioadmin 1 minute ago
```
### List all 'replicate' batch jobs
```
mc batch list alias/ --type replicate
ID TYPE USER STARTED
E24HH4nNMcgY5taynaPfxu replicate minioadmin 1 minute ago
```
### Real-time 'status' for a batch job
```
mc batch status myminio/ E24HH4nNMcgY5taynaPfxu
โโโ
Objects: 28766
Versions: 28766
Throughput: 3.0 MiB/s
Transferred: 406 MiB
Elapsed: 2m14.227222868s
CurrObjName: share/doc/xml-core/examples/foo.xmlcatalogs
```
### 'describe' the batch job yaml.
```
mc batch describe myminio/ E24HH4nNMcgY5taynaPfxu
replicate:
apiVersion: v1
...
```
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package plugin
import (
"bytes"
"context"
"crypto/sha1"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"regexp"
"sync"
"time"
"github.com/minio/minio/internal/arn"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/env"
xnet "github.com/minio/pkg/net"
)
// Authentication Plugin config and env variables
const (
URL = "url"
AuthToken = "auth_token"
RolePolicy = "role_policy"
RoleID = "role_id"
EnvIdentityPluginURL = "MINIO_IDENTITY_PLUGIN_URL"
EnvIdentityPluginAuthToken = "MINIO_IDENTITY_PLUGIN_AUTH_TOKEN"
EnvIdentityPluginRolePolicy = "MINIO_IDENTITY_PLUGIN_ROLE_POLICY"
EnvIdentityPluginRoleID = "MINIO_IDENTITY_PLUGIN_ROLE_ID"
)
var (
// DefaultKVS - default config for AuthN plugin config
DefaultKVS = config.KVS{
config.KV{
Key: URL,
Value: "",
},
config.KV{
Key: AuthToken,
Value: "",
},
config.KV{
Key: RolePolicy,
Value: "",
},
config.KV{
Key: RoleID,
Value: "",
},
}
defaultHelpPostfix = func(key string) string {
return config.DefaultHelpPostfix(DefaultKVS, key)
}
// Help for Identity Plugin
Help = config.HelpKVS{
config.HelpKV{
Key: URL,
Description: `plugin hook endpoint (HTTP(S)) e.g. "http://localhost:8181/path/to/endpoint"` + defaultHelpPostfix(URL),
Type: "url",
},
config.HelpKV{
Key: AuthToken,
Description: "authorization token for plugin hook endpoint" + defaultHelpPostfix(AuthToken),
Optional: true,
Type: "string",
Sensitive: true,
Secret: true,
},
config.HelpKV{
Key: RolePolicy,
Description: "policies to apply for plugin authorized users" + defaultHelpPostfix(RolePolicy),
Type: "string",
},
config.HelpKV{
Key: RoleID,
Description: "unique ID to generate the ARN" + defaultHelpPostfix(RoleID),
Optional: true,
Type: "string",
},
config.HelpKV{
Key: config.Comment,
Description: config.DefaultComment,
Optional: true,
Type: "sentence",
},
}
)
// Allows only Base64 URL encoding characters.
var validRoleIDRegex = regexp.MustCompile(`^[A-Za-z0-9_-]+$`)
// Args for authentication plugin.
type Args struct {
URL *xnet.URL
AuthToken string
Transport http.RoundTripper
CloseRespFn func(r io.ReadCloser)
RolePolicy string
RoleARN arn.ARN
}
// Validate - validate configuration params.
func (a *Args) Validate() error {
req, err := http.NewRequest(http.MethodPost, a.URL.String(), bytes.NewReader([]byte("")))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
if a.AuthToken != "" {
req.Header.Set("Authorization", a.AuthToken)
}
client := &http.Client{Transport: a.Transport}
resp, err := client.Do(req)
if err != nil {
return err
}
defer a.CloseRespFn(resp.Body)
return nil
}
type serviceRTTMinuteStats struct {
statsTime time.Time
rttMsSum, maxRttMs float64
successRequestCount int64
failedRequestCount int64
}
type metrics struct {
sync.Mutex
LastCheckSuccess time.Time
LastCheckFailure time.Time
lastFullMinute serviceRTTMinuteStats
currentMinute serviceRTTMinuteStats
}
func (h *metrics) setConnSuccess(reqStartTime time.Time) {
h.Lock()
defer h.Unlock()
h.LastCheckSuccess = reqStartTime
}
func (h *metrics) setConnFailure(reqStartTime time.Time) {
h.Lock()
defer h.Unlock()
h.LastCheckFailure = reqStartTime
}
func (h *metrics) updateLastFullMinute(currReqMinute time.Time) {
// Assumes the caller has h.Lock()'ed
h.lastFullMinute = h.currentMinute
h.currentMinute = serviceRTTMinuteStats{
statsTime: currReqMinute,
}
}
func (h *metrics) accumRequestRTT(reqStartTime time.Time, rttMs float64, isSuccess bool) {
h.Lock()
defer h.Unlock()
// Update connectivity times
if isSuccess {
if reqStartTime.After(h.LastCheckSuccess) {
h.LastCheckSuccess = reqStartTime
}
} else {
if reqStartTime.After(h.LastCheckFailure) {
h.LastCheckFailure = reqStartTime
}
}
// Round the reqest time *down* to whole minute.
reqTimeMinute := reqStartTime.Truncate(time.Minute)
if reqTimeMinute.After(h.currentMinute.statsTime) {
// Drop the last full minute now, since we got a request for a time we
// are not yet tracking.
h.updateLastFullMinute(reqTimeMinute)
}
var entry *serviceRTTMinuteStats
switch {
case reqTimeMinute.Equal(h.currentMinute.statsTime):
entry = &h.currentMinute
case reqTimeMinute.Equal(h.lastFullMinute.statsTime):
entry = &h.lastFullMinute
default:
// This request is too old, it should never happen, ignore it as we
// cannot return an error.
return
}
// Update stats
if isSuccess {
if entry.maxRttMs < rttMs {
entry.maxRttMs = rttMs
}
entry.rttMsSum += rttMs
entry.successRequestCount++
} else {
entry.failedRequestCount++
}
}
// AuthNPlugin - implements pluggable authentication via webhook.
type AuthNPlugin struct {
args Args
client *http.Client
shutdownCtx context.Context
serviceMetrics *metrics
}
// Enabled returns if AuthNPlugin is enabled.
func Enabled(kvs config.KVS) bool {
return kvs.Get(URL) != ""
}
// LookupConfig lookup AuthNPlugin from config, override with any ENVs.
func LookupConfig(kv config.KVS, transport *http.Transport, closeRespFn func(io.ReadCloser), serverRegion string) (Args, error) {
args := Args{}
if err := config.CheckValidKeys(config.IdentityPluginSubSys, kv, DefaultKVS); err != nil {
return args, err
}
pluginURL := env.Get(EnvIdentityPluginURL, kv.Get(URL))
if pluginURL == "" {
return args, nil
}
authToken := env.Get(EnvIdentityPluginAuthToken, kv.Get(AuthToken))
u, err := xnet.ParseHTTPURL(pluginURL)
if err != nil {
return args, err
}
rolePolicy := env.Get(EnvIdentityPluginRolePolicy, kv.Get(RolePolicy))
if rolePolicy == "" {
return args, config.Errorf("A role policy must be specified for Identity Management Plugin")
}
resourceID := "idmp-"
roleID := env.Get(EnvIdentityPluginRoleID, kv.Get(RoleID))
if roleID == "" {
// We use a hash of the plugin URL so that the ARN remains
// constant across restarts.
h := sha1.New()
h.Write([]byte(pluginURL))
bs := h.Sum(nil)
resourceID += base64.RawURLEncoding.EncodeToString(bs)
} else {
// Check that the roleID is restricted to URL safe characters
// (base64 URL encoding chars).
if !validRoleIDRegex.MatchString(roleID) {
return args, config.Errorf("Role ID must match the regexp `^[a-zA-Z0-9_-]+$`")
}
// Use the user provided ID here.
resourceID += roleID
}
roleArn, err := arn.NewIAMRoleARN(resourceID, serverRegion)
if err != nil {
return args, config.Errorf("unable to generate ARN from the plugin config: %v", err)
}
args = Args{
URL: u,
AuthToken: authToken,
Transport: transport,
CloseRespFn: closeRespFn,
RolePolicy: rolePolicy,
RoleARN: roleArn,
}
if err = args.Validate(); err != nil {
return args, err
}
return args, nil
}
// New - initializes Authorization Management Plugin.
func New(shutdownCtx context.Context, args Args) *AuthNPlugin {
if args.URL == nil || args.URL.Scheme == "" && args.AuthToken == "" {
return nil
}
plugin := AuthNPlugin{
args: args,
client: &http.Client{Transport: args.Transport},
shutdownCtx: shutdownCtx,
serviceMetrics: &metrics{
Mutex: sync.Mutex{},
LastCheckSuccess: time.Unix(0, 0),
LastCheckFailure: time.Unix(0, 0),
lastFullMinute: serviceRTTMinuteStats{},
currentMinute: serviceRTTMinuteStats{},
},
}
go plugin.doPeriodicHealthCheck()
return &plugin
}
// AuthNSuccessResponse - represents the response from the authentication plugin
// service.
type AuthNSuccessResponse struct {
User string `json:"user"`
MaxValiditySeconds int `json:"maxValiditySeconds"`
Claims map[string]interface{} `json:"claims"`
}
// AuthNErrorResponse - represents an error response from the authN plugin.
type AuthNErrorResponse struct {
Reason string `json:"reason"`
}
// AuthNResponse - represents a result of the authentication operation.
type AuthNResponse struct {
Success *AuthNSuccessResponse
Failure *AuthNErrorResponse
}
const (
minValidityDurationSeconds int = 900
maxValidityDurationSeconds int = 365 * 24 * 3600
)
// Authenticate authenticates the token with the external hook endpoint and
// returns a parent user, max expiry duration for the authentication and a set
// of claims.
func (o *AuthNPlugin) Authenticate(roleArn arn.ARN, token string) (AuthNResponse, error) {
if o == nil {
return AuthNResponse{}, nil
}
if roleArn != o.args.RoleARN {
return AuthNResponse{}, fmt.Errorf("Invalid role ARN value: %s", roleArn.String())
}
u := url.URL(*o.args.URL)
q := u.Query()
q.Set("token", token)
u.RawQuery = q.Encode()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.String(), nil)
if err != nil {
return AuthNResponse{}, err
}
if o.args.AuthToken != "" {
req.Header.Set("Authorization", o.args.AuthToken)
}
reqStartTime := time.Now()
resp, err := o.client.Do(req)
if err != nil {
o.serviceMetrics.accumRequestRTT(reqStartTime, 0, false)
return AuthNResponse{}, err
}
defer o.args.CloseRespFn(resp.Body)
reqDurNanos := time.Since(reqStartTime).Nanoseconds()
o.serviceMetrics.accumRequestRTT(reqStartTime, float64(reqDurNanos)/1e6, true)
switch resp.StatusCode {
case 200:
var result AuthNSuccessResponse
if err = json.NewDecoder(resp.Body).Decode(&result); err != nil {
return AuthNResponse{}, err
}
if result.MaxValiditySeconds < minValidityDurationSeconds || result.MaxValiditySeconds > maxValidityDurationSeconds {
return AuthNResponse{}, fmt.Errorf("Plugin returned an invalid validity duration (%d) - should be between %d and %d",
result.MaxValiditySeconds, minValidityDurationSeconds, maxValidityDurationSeconds)
}
return AuthNResponse{
Success: &result,
}, nil
case 403:
var result AuthNErrorResponse
if err = json.NewDecoder(resp.Body).Decode(&result); err != nil {
return AuthNResponse{}, err
}
return AuthNResponse{
Failure: &result,
}, nil
default:
return AuthNResponse{}, fmt.Errorf("Invalid status code %d from auth plugin", resp.StatusCode)
}
}
// GetRoleInfo - returns ARN to policies map.
func (o *AuthNPlugin) GetRoleInfo() map[arn.ARN]string {
return map[arn.ARN]string{
o.args.RoleARN: o.args.RolePolicy,
}
}
// checkConnectivity returns true if we are able to connect to the plugin
// service.
func (o *AuthNPlugin) checkConnectivity(ctx context.Context) bool {
ctx, cancel := context.WithTimeout(ctx, healthCheckTimeout)
defer cancel()
u := url.URL(*o.args.URL)
req, err := http.NewRequestWithContext(ctx, http.MethodHead, u.String(), nil)
if err != nil {
logger.LogIf(ctx, err)
return false
}
if o.args.AuthToken != "" {
req.Header.Set("Authorization", o.args.AuthToken)
}
resp, err := o.client.Do(req)
if err != nil {
return false
}
defer o.args.CloseRespFn(resp.Body)
return true
}
var (
healthCheckInterval = 1 * time.Minute
healthCheckTimeout = 5 * time.Second
)
func (o *AuthNPlugin) doPeriodicHealthCheck() {
ticker := time.NewTicker(healthCheckInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
now := time.Now()
isConnected := o.checkConnectivity(o.shutdownCtx)
if isConnected {
o.serviceMetrics.setConnSuccess(now)
} else {
o.serviceMetrics.setConnFailure(now)
}
case <-o.shutdownCtx.Done():
return
}
}
}
// Metrics contains metrics about the authentication plugin service.
type Metrics struct {
LastReachableSecs, LastUnreachableSecs float64
// Last whole minute stats
TotalRequests, FailedRequests int64
AvgSuccRTTMs float64
MaxSuccRTTMs float64
}
// Metrics reports metrics related to plugin service reachability and stats for the last whole minute
func (o *AuthNPlugin) Metrics() Metrics {
if o == nil {
// Return empty metrics when not configured.
return Metrics{}
}
o.serviceMetrics.Lock()
defer o.serviceMetrics.Unlock()
l := &o.serviceMetrics.lastFullMinute
var avg float64
if l.successRequestCount > 0 {
avg = l.rttMsSum / float64(l.successRequestCount)
}
now := time.Now().UTC()
return Metrics{
LastReachableSecs: now.Sub(o.serviceMetrics.LastCheckSuccess).Seconds(),
LastUnreachableSecs: now.Sub(o.serviceMetrics.LastCheckFailure).Seconds(),
TotalRequests: l.failedRequestCount + l.successRequestCount,
FailedRequests: l.failedRequestCount,
AvgSuccRTTMs: avg,
MaxSuccRTTMs: l.maxRttMs,
}
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package callhome
import (
"sync"
"time"
"github.com/minio/minio/internal/config"
"github.com/minio/pkg/env"
)
// Callhome related keys
const (
Enable = "enable"
Frequency = "frequency"
)
// DefaultKVS - default KV config for subnet settings
var DefaultKVS = config.KVS{
config.KV{
Key: Enable,
Value: "off",
},
config.KV{
Key: Frequency,
Value: "24h",
},
}
// callhomeCycleDefault is the default interval between two callhome cycles (24hrs)
const callhomeCycleDefault = 24 * time.Hour
// Config represents the subnet related configuration
type Config struct {
// Flag indicating whether callhome is enabled.
Enable bool `json:"enable"`
// The interval between callhome cycles
Frequency time.Duration `json:"frequency"`
}
var configLock sync.RWMutex
// Enabled - indicates if callhome is enabled or not
func (c *Config) Enabled() bool {
configLock.RLock()
defer configLock.RUnlock()
return c.Enable
}
// FrequencyDur - returns the currently configured callhome frequency
func (c *Config) FrequencyDur() time.Duration {
configLock.RLock()
defer configLock.RUnlock()
if c.Frequency == 0 {
return callhomeCycleDefault
}
return c.Frequency
}
// Update updates new callhome frequency
func (c *Config) Update(ncfg Config) {
configLock.Lock()
defer configLock.Unlock()
c.Enable = ncfg.Enable
c.Frequency = ncfg.Frequency
}
// LookupConfig - lookup config and override with valid environment settings if any.
func LookupConfig(kvs config.KVS) (cfg Config, err error) {
if err = config.CheckValidKeys(config.CallhomeSubSys, kvs, DefaultKVS); err != nil {
return cfg, err
}
cfg.Enable = env.Get(config.EnvMinIOCallhomeEnable,
kvs.GetWithDefault(Enable, DefaultKVS)) == config.EnableOn
cfg.Frequency, err = time.ParseDuration(env.Get(config.EnvMinIOCallhomeFrequency,
kvs.GetWithDefault(Frequency, DefaultKVS)))
return cfg, err
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package http
import (
"context"
"net"
"time"
"github.com/rs/dnscache"
)
// DialContextWithDNSCache is a helper function which returns `net.DialContext` function.
// It randomly fetches an IP from the DNS cache and dials it by the given dial
// function. It dials one by one and returns first connected `net.Conn`.
// If it fails to dial all IPs from cache it returns first error. If no baseDialFunc
// is given, it sets default dial function.
//
// You can use returned dial function for `http.Transport.DialContext`.
//
// In this function, it uses functions from `rand` package. To make it really random,
// you MUST call `rand.Seed` and change the value from the default in your application
func DialContextWithDNSCache(resolver *dnscache.Resolver, baseDialCtx DialContext) DialContext {
if baseDialCtx == nil {
// This is same as which `http.DefaultTransport` uses.
baseDialCtx = (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext
}
return func(ctx context.Context, network, addr string) (conn net.Conn, err error) {
host, port, err := net.SplitHostPort(addr)
if err != nil {
return nil, err
}
if net.ParseIP(host) != nil {
// For IP only setups there is no need for DNS lookups.
return baseDialCtx(ctx, "tcp", addr)
}
ips, err := resolver.LookupHost(ctx, host)
if err != nil {
return nil, err
}
for _, ip := range ips {
conn, err = baseDialCtx(ctx, "tcp", net.JoinHostPort(ip, port))
if err == nil {
break
}
}
return
}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"io"
"time"
"github.com/minio/madmin-go/v3"
)
// StorageAPI interface.
type StorageAPI interface {
// Stringified version of disk.
String() string
// Storage operations.
// Returns true if disk is online and its valid i.e valid format.json.
// This has nothing to do with if the drive is hung or not responding.
// For that individual storage API calls will fail properly. The purpose
// of this function is to know if the "drive" has "format.json" or not
// if it has a "format.json" then is it correct "format.json" or not.
IsOnline() bool
// Returns the last time this disk (re)-connected
LastConn() time.Time
// Indicates if disk is local or not.
IsLocal() bool
// Returns hostname if disk is remote.
Hostname() string
// Returns the entire endpoint.
Endpoint() Endpoint
// Close the disk, mark it purposefully closed, only implemented for remote disks.
Close() error
// Returns the unique 'uuid' of this disk.
GetDiskID() (string, error)
// Set a unique 'uuid' for this disk, only used when
// disk is replaced and formatted.
SetDiskID(id string)
// Returns healing information for a newly replaced disk,
// returns 'nil' once healing is complete or if the disk
// has never been replaced.
Healing() *healingTracker
DiskInfo(ctx context.Context) (info DiskInfo, err error)
NSScanner(ctx context.Context, cache dataUsageCache, updates chan<- dataUsageEntry, scanMode madmin.HealScanMode) (dataUsageCache, error)
// Volume operations.
MakeVol(ctx context.Context, volume string) (err error)
MakeVolBulk(ctx context.Context, volumes ...string) (err error)
ListVols(ctx context.Context) (vols []VolInfo, err error)
StatVol(ctx context.Context, volume string) (vol VolInfo, err error)
DeleteVol(ctx context.Context, volume string, forceDelete bool) (err error)
// WalkDir will walk a directory on disk and return a metacache stream on wr.
WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writer) error
// Metadata operations
DeleteVersion(ctx context.Context, volume, path string, fi FileInfo, forceDelMarker bool) error
DeleteVersions(ctx context.Context, volume string, versions []FileInfoVersions) []error
WriteMetadata(ctx context.Context, volume, path string, fi FileInfo) error
UpdateMetadata(ctx context.Context, volume, path string, fi FileInfo) error
ReadVersion(ctx context.Context, volume, path, versionID string, readData bool) (FileInfo, error)
ReadXL(ctx context.Context, volume, path string, readData bool) (RawFileInfo, error)
RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string) (uint64, error)
// File operations.
ListDir(ctx context.Context, volume, dirPath string, count int) ([]string, error)
ReadFile(ctx context.Context, volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error)
AppendFile(ctx context.Context, volume string, path string, buf []byte) (err error)
CreateFile(ctx context.Context, volume, path string, size int64, reader io.Reader) error
ReadFileStream(ctx context.Context, volume, path string, offset, length int64) (io.ReadCloser, error)
RenameFile(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string) error
CheckParts(ctx context.Context, volume string, path string, fi FileInfo) error
Delete(ctx context.Context, volume string, path string, deleteOpts DeleteOptions) (err error)
VerifyFile(ctx context.Context, volume, path string, fi FileInfo) error
StatInfoFile(ctx context.Context, volume, path string, glob bool) (stat []StatInfo, err error)
ReadMultiple(ctx context.Context, req ReadMultipleReq, resp chan<- ReadMultipleResp) error
CleanAbandonedData(ctx context.Context, volume string, path string) error
// Write all data, syncs the data to disk.
// Should be used for smaller payloads.
WriteAll(ctx context.Context, volume string, path string, b []byte) (err error)
// Read all.
ReadAll(ctx context.Context, volume string, path string) (buf []byte, err error)
GetDiskLoc() (poolIdx, setIdx, diskIdx int) // Retrieve location indexes.
SetDiskLoc(poolIdx, setIdx, diskIdx int) // Set location indexes.
}
type unrecognizedDisk struct {
storage StorageAPI
}
func (p *unrecognizedDisk) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writer) (err error) {
return errDiskNotFound
}
func (p *unrecognizedDisk) String() string {
return p.storage.String()
}
func (p *unrecognizedDisk) IsOnline() bool {
return false
}
func (p *unrecognizedDisk) LastConn() time.Time {
return p.storage.LastConn()
}
func (p *unrecognizedDisk) IsLocal() bool {
return p.storage.IsLocal()
}
func (p *unrecognizedDisk) Endpoint() Endpoint {
return p.storage.Endpoint()
}
func (p *unrecognizedDisk) Hostname() string {
return p.storage.Hostname()
}
func (p *unrecognizedDisk) Healing() *healingTracker {
return nil
}
func (p *unrecognizedDisk) NSScanner(ctx context.Context, cache dataUsageCache, updates chan<- dataUsageEntry, scanMode madmin.HealScanMode) (dataUsageCache, error) {
return dataUsageCache{}, errDiskNotFound
}
func (p *unrecognizedDisk) GetDiskLoc() (poolIdx, setIdx, diskIdx int) {
return -1, -1, -1
}
func (p *unrecognizedDisk) SetDiskLoc(poolIdx, setIdx, diskIdx int) {
}
func (p *unrecognizedDisk) Close() error {
return p.storage.Close()
}
func (p *unrecognizedDisk) GetDiskID() (string, error) {
return "", errDiskNotFound
}
func (p *unrecognizedDisk) SetDiskID(id string) {
}
func (p *unrecognizedDisk) DiskInfo(ctx context.Context) (info DiskInfo, err error) {
return info, errDiskNotFound
}
func (p *unrecognizedDisk) MakeVolBulk(ctx context.Context, volumes ...string) (err error) {
return errDiskNotFound
}
func (p *unrecognizedDisk) MakeVol(ctx context.Context, volume string) (err error) {
return errDiskNotFound
}
func (p *unrecognizedDisk) ListVols(ctx context.Context) ([]VolInfo, error) {
return nil, errDiskNotFound
}
func (p *unrecognizedDisk) StatVol(ctx context.Context, volume string) (vol VolInfo, err error) {
return vol, errDiskNotFound
}
func (p *unrecognizedDisk) DeleteVol(ctx context.Context, volume string, forceDelete bool) (err error) {
return errDiskNotFound
}
func (p *unrecognizedDisk) ListDir(ctx context.Context, volume, dirPath string, count int) ([]string, error) {
return nil, errDiskNotFound
}
func (p *unrecognizedDisk) ReadFile(ctx context.Context, volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) {
return 0, errDiskNotFound
}
func (p *unrecognizedDisk) AppendFile(ctx context.Context, volume string, path string, buf []byte) (err error) {
return errDiskNotFound
}
func (p *unrecognizedDisk) CreateFile(ctx context.Context, volume, path string, size int64, reader io.Reader) error {
return errDiskNotFound
}
func (p *unrecognizedDisk) ReadFileStream(ctx context.Context, volume, path string, offset, length int64) (io.ReadCloser, error) {
return nil, errDiskNotFound
}
func (p *unrecognizedDisk) RenameFile(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string) error {
return errDiskNotFound
}
func (p *unrecognizedDisk) RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string) (uint64, error) {
return 0, errDiskNotFound
}
func (p *unrecognizedDisk) CheckParts(ctx context.Context, volume string, path string, fi FileInfo) (err error) {
return errDiskNotFound
}
func (p *unrecognizedDisk) Delete(ctx context.Context, volume string, path string, deleteOpts DeleteOptions) (err error) {
return errDiskNotFound
}
// DeleteVersions deletes slice of versions, it can be same object
// or multiple objects.
func (p *unrecognizedDisk) DeleteVersions(ctx context.Context, volume string, versions []FileInfoVersions) (errs []error) {
errs = make([]error, len(versions))
for i := range errs {
errs[i] = errDiskNotFound
}
return errs
}
func (p *unrecognizedDisk) VerifyFile(ctx context.Context, volume, path string, fi FileInfo) error {
return errDiskNotFound
}
func (p *unrecognizedDisk) WriteAll(ctx context.Context, volume string, path string, b []byte) (err error) {
return errDiskNotFound
}
func (p *unrecognizedDisk) DeleteVersion(ctx context.Context, volume, path string, fi FileInfo, forceDelMarker bool) (err error) {
return errDiskNotFound
}
func (p *unrecognizedDisk) UpdateMetadata(ctx context.Context, volume, path string, fi FileInfo) (err error) {
return errDiskNotFound
}
func (p *unrecognizedDisk) WriteMetadata(ctx context.Context, volume, path string, fi FileInfo) (err error) {
return errDiskNotFound
}
func (p *unrecognizedDisk) ReadVersion(ctx context.Context, volume, path, versionID string, readData bool) (fi FileInfo, err error) {
return fi, errDiskNotFound
}
func (p *unrecognizedDisk) ReadXL(ctx context.Context, volume, path string, readData bool) (rf RawFileInfo, err error) {
return rf, errDiskNotFound
}
func (p *unrecognizedDisk) ReadAll(ctx context.Context, volume string, path string) (buf []byte, err error) {
return nil, errDiskNotFound
}
func (p *unrecognizedDisk) StatInfoFile(ctx context.Context, volume, path string, glob bool) (stat []StatInfo, err error) {
return nil, errDiskNotFound
}
func (p *unrecognizedDisk) ReadMultiple(ctx context.Context, req ReadMultipleReq, resp chan<- ReadMultipleResp) error {
close(resp)
return errDiskNotFound
}
func (p *unrecognizedDisk) CleanAbandonedData(ctx context.Context, volume string, path string) error {
return errDiskNotFound
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"context"
"crypto/md5"
"flag"
"fmt"
"io"
"log"
"net/url"
"os"
"path"
"strconv"
"strings"
"time"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
)
var (
endpoint, accessKey, secretKey string
minModTimeStr string
bucket, prefix string
debug bool
versions bool
insecure bool
)
// getMD5Sum returns MD5 sum of given data.
func getMD5Sum(data []byte) []byte {
hash := md5.New()
hash.Write(data)
return hash.Sum(nil)
}
func main() {
flag.StringVar(&endpoint, "endpoint", "https://play.min.io", "S3 endpoint URL")
flag.StringVar(&accessKey, "access-key", "<KEY>", "S3 Access Key")
flag.StringVar(&secretKey, "secret-key", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", "S3 Secret Key")
flag.StringVar(&bucket, "bucket", "", "Select a specific bucket")
flag.StringVar(&prefix, "prefix", "", "Select a prefix")
flag.BoolVar(&debug, "debug", false, "Prints HTTP network calls to S3 endpoint")
flag.BoolVar(&versions, "versions", false, "Verify all versions")
flag.BoolVar(&insecure, "insecure", false, "Disable TLS verification")
flag.StringVar(&minModTimeStr, "modified-since", "", "Specify a minimum object last modified time, e.g.: 2023-01-02T15:04:05Z")
flag.Parse()
if endpoint == "" {
log.Fatalln("Endpoint is not provided")
}
if accessKey == "" {
log.Fatalln("Access key is not provided")
}
if secretKey == "" {
log.Fatalln("Secret key is not provided")
}
if bucket == "" && prefix != "" {
log.Fatalln("--prefix is specified without --bucket.")
}
var minModTime time.Time
if minModTimeStr != "" {
var e error
minModTime, e = time.Parse(time.RFC3339, minModTimeStr)
if e != nil {
log.Fatalln("Unable to parse --modified-since:", e)
}
}
u, err := url.Parse(endpoint)
if err != nil {
log.Fatalln(err)
}
secure := strings.EqualFold(u.Scheme, "https")
transport, err := minio.DefaultTransport(secure)
if err != nil {
log.Fatalln(err)
}
if insecure {
// skip TLS verification
transport.TLSClientConfig.InsecureSkipVerify = true
}
s3Client, err := minio.New(u.Host, &minio.Options{
Creds: credentials.NewStaticV4(accessKey, secretKey, ""),
Secure: secure,
Transport: transport,
})
if err != nil {
log.Fatalln(err)
}
if debug {
s3Client.TraceOn(os.Stderr)
}
var buckets []string
if bucket != "" {
buckets = append(buckets, bucket)
} else {
bucketsInfo, err := s3Client.ListBuckets(context.Background())
if err != nil {
log.Fatalln(err)
}
for _, b := range bucketsInfo {
buckets = append(buckets, b.Name)
}
}
for _, bucket := range buckets {
opts := minio.ListObjectsOptions{
Recursive: true,
Prefix: prefix,
WithVersions: versions,
WithMetadata: true,
}
objFullPath := func(obj minio.ObjectInfo) (fpath string) {
fpath = path.Join(bucket, obj.Key)
if versions {
fpath += ":" + obj.VersionID
}
return
}
// List all objects from a bucket-name with a matching prefix.
for object := range s3Client.ListObjects(context.Background(), bucket, opts) {
if object.Err != nil {
log.Println("FAILED: LIST with error:", object.Err)
continue
}
if !minModTime.IsZero() && object.LastModified.Before(minModTime) {
continue
}
if object.IsDeleteMarker {
log.Println("SKIPPED: DELETE marker object:", objFullPath(object))
continue
}
if _, ok := object.UserMetadata["X-Amz-Server-Side-Encryption-Customer-Algorithm"]; ok {
log.Println("SKIPPED: Objects encrypted with SSE-C do not have md5sum as ETag:", objFullPath(object))
continue
}
if v, ok := object.UserMetadata["X-Amz-Server-Side-Encryption"]; ok && v == "aws:kms" {
log.Println("FAILED: encrypted with SSE-KMS do not have md5sum as ETag:", objFullPath(object))
continue
}
parts := 1
multipart := false
s := strings.Split(object.ETag, "-")
switch len(s) {
case 1:
// nothing to do
case 2:
if p, err := strconv.Atoi(s[1]); err == nil {
parts = p
} else {
log.Println("FAILED: ETAG of", objFullPath(object), "has a wrong format:", err)
continue
}
multipart = true
default:
log.Println("FAILED: Unexpected ETAG", object.ETag, "for object:", objFullPath(object))
continue
}
var partsMD5Sum [][]byte
var failedMD5 bool
for p := 1; p <= parts; p++ {
opts := minio.GetObjectOptions{
VersionID: object.VersionID,
PartNumber: p,
}
obj, err := s3Client.GetObject(context.Background(), bucket, object.Key, opts)
if err != nil {
log.Println("FAILED: GET", objFullPath(object), "=>", err)
failedMD5 = true
break
}
h := md5.New()
if _, err := io.Copy(h, obj); err != nil {
log.Println("FAILED: MD5 calculation error:", objFullPath(object), "=>", err)
failedMD5 = true
break
}
partsMD5Sum = append(partsMD5Sum, h.Sum(nil))
}
if failedMD5 {
log.Println("CORRUPTED object:", objFullPath(object))
continue
}
corrupted := false
if !multipart {
md5sum := fmt.Sprintf("%x", partsMD5Sum[0])
if md5sum != object.ETag {
corrupted = true
}
} else {
var totalMD5SumBytes []byte
for _, sum := range partsMD5Sum {
totalMD5SumBytes = append(totalMD5SumBytes, sum...)
}
s3MD5 := fmt.Sprintf("%x-%d", getMD5Sum(totalMD5SumBytes), parts)
if s3MD5 != object.ETag {
corrupted = true
}
}
if corrupted {
log.Println("CORRUPTED object:", objFullPath(object))
} else {
log.Println("INTACT object:", objFullPath(object))
}
}
}
}
<file_sep># Decommissioning
Decommissiong is a mechanism in MinIO to drain older pools (usually with old hardware) and migrate the content from such pools to a newer pools (usually better hardware). Decommissioning spreads the data across all pools - for example, if you decommission `pool1`, all the data from `pool1` spreads across `pool2` and `pool3`.
## Features
- A pool in decommission still allows READ access to all its contents, newer WRITEs will automatically be scheduled to only pools not in decommission status.
- All versioned buckets maintain the same order for "versions" for each object after being decommissioned to the other pools.
- A pool interrupted during the decommission process, such as for a cluster restart, resumes from where it left off.
## How to decommission a pool
```
ฮป mc admin decommission start alias/ http://minio{1...2}/data{1...4}
```
## Status decommissioning a pool
### Decommissioning without args lists all pools
```
ฮป mc admin decommission status alias/
โโโโโโโฌโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฌโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฌโโโโโโโโโ
โ ID โ Pools โ Capacity โ Status โ
โ 1st โ http://minio{1...2}/data{1...4} โ 439 GiB (used) / 561 GiB (total) โ Active โ
โ 2nd โ http://minio{3...4}/data{1...4} โ 329 GiB (used) / 421 GiB (total) โ Active โ
โโโโโโโดโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโดโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโดโโโโโโโโโ
```
### Decommissioning status
```
ฮป mc admin decommission status alias/ http://minio{1...2}/data{1...4}
Decommissioning rate at 36 MiB/sec [4 TiB/50 TiB]
Started: 1 minute ago
```
Once it is **Complete**
```
ฮป mc admin decommission status alias/ http://minio{1...2}/data{1...4}
Decommission of pool http://minio{1...2}/data{1...4} is complete, you may now remove it from server command line
```
### A pool not under decommissioning will throw an error
```
ฮป mc admin decommission status alias/ http://minio{1...2}/data{1...4}
ERROR: This pool is not scheduled for decommissioning currently.
```
## Canceling a decommission
Stop an on-going decommission in progress, mainly used in situations when the load may be too high and you may want to schedule the decommission at a later point in time.
`mc admin decommission cancel` without an argument, lists out any on-going decommission in progress.
```
ฮป mc admin decommission cancel alias/
โโโโโโโฌโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฌโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฌโโโโโโโโโโโ
โ ID โ Pools โ Capacity โ Status โ
โ 1st โ http://minio{1...2}/data{1...4} โ 439 GiB (used) / 561 GiB (total) โ Draining โ
โโโโโโโดโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโดโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโดโโโโโโโโโโโ
```
> NOTE: Canceled decommission will not make the pool active again, since we might have potentially partial namespace on the other pools, to avoid this scenario be absolutely sure to make decommissioning a planned well thought activity. This is not to be run on a daily basis.
```
ฮป mc admin decommission cancel alias/ http://minio{1...2}/data{1...4}
โโโโโโโฌโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฌโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฌโโโโโโโโโโโโโโโโโโโโโ
โ ID โ Pools โ Capacity โ Status โ
โ 1st โ http://minio{1...2}/data{1...4} โ 439 GiB (used) / 561 GiB (total) โ Draining(Canceled) โ
โโโโโโโดโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโดโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโดโโโโโโโโโโโโโโโโโโโโโ
```
If the decommission process fails for any reason, the status indicates failed.
```
ฮป mc admin decommission status alias/
โโโโโโโฌโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฌโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฌโโโโโโโโโโโโโโโโโโโ
โ ID โ Pools โ Capacity โ Status โ
โ 1st โ http://minio{1...2}/data{1...4} โ 439 GiB (used) / 561 GiB (total) โ Draining(Failed) โ
โ 2nd โ http://minio{3...4}/data{1...4} โ 329 GiB (used) / 421 GiB (total) โ Active โ
โโโโโโโดโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโดโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโดโโโโโโโโโโโโโโโโโโโ
```
## Restart a canceled or failed decommission
```
ฮป mc admin decommission start alias/ http://minio{1...2}/data{1...4}
```
## When decommission is 'Complete'
Once decommission is complete, it will be indicated with *Complete* status. *Complete* means that now you can now safely remove the first pool argument from the MinIO command line.
```
ฮป mc admin decommission status alias/
โโโโโโโฌโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฌโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฌโโโโโโโโโโโ
โ ID โ Pools โ Capacity โ Status โ
โ 1st โ http://minio{1...2}/data{1...4} โ 439 GiB (used) / 561 GiB (total) โ Complete โ
โ 2nd โ http://minio{3...4}/data{1...4} โ 329 GiB (used) / 421 GiB (total) โ Active โ
โโโโโโโดโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโดโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโดโโโโโโโโโโโ
```
- On baremetal setups, if you have `MINIO_VOLUMES="http://minio{1...2}/data{1...4} http://minio{3...4}/data{1...4}"`, you can remove the first argument `http://minio{1...2}/data{1...4}` to update your `MINIO_VOLUMES` setting, then restart all the servers in the setup in parallel using `systemctl restart minio`.
- On Kubernetes setups, the statefulset specification needs to be modified by changing the command line input for the MinIO container. Once the relevant changes are done, proceed to execute `kubectl apply -f statefulset.yaml`.
- On Operator based MinIO deployments, you need to modify the `tenant.yaml` specification and modify the `pools:` section from two entries to a single entry. After making relevant changes, proceed to execute `kubectl apply -f tenant.yaml`.
> Without a 'Complete' status any 'Active' or 'Draining' pool(s) are not allowed to be removed once configured.
## NOTE
- Empty delete markers (such as for objects with no other successor versions) do not transition to the new pool to avoid creating empty metadata on the other pool(s). If you believe transitioning empty delete markers is required, open a GitHub issue.
## TODO
- Richer progress UI is not present at the moment, this will be addressed in subsequent releases. Currently however a RATE of data transfer and usage increase is displayed via `mc`.
- Transitioned Hot Tier's as pooled setups are not currently supported, attempting to decommission buckets with ILM Transition will be rejected by the server. This will be supported in future releases.
- Embedded Console UI does not support Decommissioning through the UI yet. This will be supported in future releases.
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package http
import (
"bytes"
"io"
)
// RequestRecorder - records the
// of a given io.Reader
type RequestRecorder struct {
// Data source to record
io.Reader
// Response body should be logged
LogBody bool
// internal recording buffer
buf bytes.Buffer
// total bytes read including header size
bytesRead int
}
// Close is a no operation closer
func (r *RequestRecorder) Close() error {
// no-op
return nil
}
// Read reads from the internal reader and counts/save the body in the memory
func (r *RequestRecorder) Read(p []byte) (n int, err error) {
n, err = r.Reader.Read(p)
r.bytesRead += n
if r.LogBody {
r.buf.Write(p[:n])
}
if err != nil {
return n, err
}
return n, err
}
// Size returns the body size if the currently read object
func (r *RequestRecorder) Size() int {
return r.bytesRead
}
// Data returns the bytes that were recorded.
func (r *RequestRecorder) Data() []byte {
// If body logging is enabled then we return the actual body
if r.LogBody {
return r.buf.Bytes()
}
// ... otherwise we return <BLOB> placeholder
return blobBody
}
<file_sep># Object Lambda
MinIO's Object Lambda implementation allows for transforming your data to serve unique data format requirements for each application. For example, a dataset created by an ecommerce application might include personally identifiable information (PII). When the same data is processed for analytics, PII should be redacted. However, if the same dataset is used for a marketing campaign, you might need to enrich the data with additional details, such as information from the customer loyalty database.
MinIO's Object Lambda, enables application developers to process data retrieved from MinIO before returning it to an application. You can register a Lambda Function target on MinIO, once successfully registered it can be used to transform the data for application GET requests on demand.
This document focuses on showing a working example on how to use Object Lambda with MinIO, you must have [MinIO deployed in your environment](https://min.io/docs/minio/linux/operations/installation.html) before you can start using external lambda functions. You also must install Python version 3.8 or later for the lambda handlers to work.
## Example Lambda handler
Install the necessary dependencies.
```sh
pip install flask requests
```
Following is an example lambda handler.
```py
from flask import Flask, request, abort, make_response
import requests
app = Flask(__name__)
@app.route('/', methods=['POST'])
def get_webhook():
if request.method == 'POST':
# obtain the request event from the 'POST' call
event = request.json
object_context = event["getObjectContext"]
# Get the presigned URL to fetch the requested
# original object from MinIO
s3_url = object_context["inputS3Url"]
# Extract the route and request token from the input context
request_route = object_context["outputRoute"]
request_token = object_context["outputToken"]
# Get the original S3 object using the presigned URL
r = requests.get(s3_url)
original_object = r.content.decode('utf-8')
# Transform all text in the original object to uppercase
# You can replace it with your custom code based on your use case
transformed_object = original_object.upper()
# Write object back to S3 Object Lambda
# response sends the transformed data
# back to MinIO and then to the user
resp = make_response(transformed_object, 200)
resp.headers['x-amz-request-route'] = request_route
resp.headers['x-amz-request-token'] = request_token
return resp
else:
abort(400)
if __name__ == '__main__':
app.run()
```
When you're writing a Lambda function for use with MinIO, the function is based on event context that MinIO provides to the Lambda function. The event context provides information about the request being made. It contains the parameters with relevant context. The fields used to create the Lambda function are as follows:
The field of `getObjectContext` means the input and output details for connections to MinIO. It has the following fields:
- `inputS3Url` โ A presigned URL that the Lambda function can use to download the original object. By using a presigned URL, the Lambda function doesn't need to have MinIO credentials to retrieve the original object. This allows Lambda function to focus on transformation of the object instead of securing the credentials.
- `outputRoute` โ A routing token that is added to the response headers when the Lambda function returns the transformed object. This is used by MinIO to further verify the incoming response validity.
- `outputToken` โ A token added to the response headers when the Lambda function returns the transformed object. This is used by MinIO to verify the incoming response validity.
Lets start the lamdba handler.
```
python lambda_handler.py
* Serving Flask app 'webhook'
* Debug mode: off
WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead.
* Running on http://127.0.0.1:5000
Press CTRL+C to quit
```
## Start MinIO with Lambda target
Register MinIO with a Lambda function, we are calling our target name as `function`, but you may call it any other friendly name of your choice.
```
MINIO_LAMBDA_WEBHOOK_ENABLE_function=on MINIO_LAMBDA_WEBHOOK_ENDPOINT_function=http://localhost:5000 minio server /data &
...
...
MinIO Object Storage Server
Copyright: 2015-2023 MinIO, Inc.
License: GNU AGPLv3 <https://www.gnu.org/licenses/agpl-3.0.html>
Version: DEVELOPMENT.2023-02-05T05-17-27Z (go1.19.4 linux/amd64)
...
...
Object Lambda ARNs: arn:minio:s3-object-lambda::function:webhook
```
### Lambda Target with Auth Token
If your lambda target expects an authorization token then you can enable it per function target as follows
```
MINIO_LAMBDA_WEBHOOK_ENABLE_function=on MINIO_LAMBDA_WEBHOOK_ENDPOINT_function=http://localhost:5000 MINIO_LAMBDA_WEBHOOK_AUTH_TOKEN="<PASSWORD>" minio server /data &
```
### Lambda Target with mTLS authentication
If your lambda target expects mTLS client you can enable it per function target as follows
```
MINIO_LAMBDA_WEBHOOK_ENABLE_function=on MINIO_LAMBDA_WEBHOOK_ENDPOINT_function=http://localhost:5000 MINIO_LAMBDA_WEBHOOK_CLIENT_CERT=client.crt MINIO_LAMBDA_WEBHOOK_CLIENT_KEY=client.key minio server /data &
```
## Create a bucket and upload some data
Create a bucket named `functionbucket`
```
mc alias set myminio/ http://localhost:9000 minioadmin minioadmin
mc mb myminio/functionbucket
```
Create a file `testobject` with some test data that will be transformed
```
cat > testobject << EOF
MinIO is a High Performance Object Storage released under GNU Affero General Public License v3.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.
EOF
```
Upload this object to the bucket via `mc cp`
```
mc cp testobject myminio/functionbucket/
```
## Invoke Lambda transformation via PresignedGET
Following example shows how you can use [`minio-go` PresignedGetObject](https://min.io/docs/minio/linux/developers/go/API.html#presignedgetobject-ctx-context-context-bucketname-objectname-string-expiry-time-duration-reqparams-url-values-url-url-error)
```go
package main
import (
"context"
"log"
"net/url"
"time"
"fmt"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
)
func main() {
s3Client, err := minio.New("localhost:9000", &minio.Options{
Creds: credentials.NewStaticV4("minioadmin", "minioadmin", ""),
Secure: false,
})
if err != nil {
log.Fatalln(err)
}
// Set lambda function target via `lambdaArn`
reqParams := make(url.Values)
reqParams.Set("lambdaArn", "arn:minio:s3-object-lambda::function:webhook")
// Generate presigned GET url with lambda function
presignedURL, err := s3Client.PresignedGetObject(context.Background(), "functionbucket", "testobject", time.Duration(1000)*time.Second, reqParams)
if err != nil {
log.Fatalln(err)
}
fmt.Println(presignedURL)
}
```
Use the Presigned URL via `curl` to receive the transformed object.
```
curl -v $(go run presigned.go)
...
...
> GET /functionbucket/testobject?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=minioadmin%2F20230205%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20230205T173023Z&X-Amz-Expires=1000&X-Amz-SignedHeaders=host&lambdaArn=arn%3Aminio%3As3-object-lambda%3A%3Atoupper%3Awebhook&X-Amz-Signature=d7e343f0da9d4fa2bc822c12ad2f54300ff16796a1edaa6d31f1313c8e94d5b2 HTTP/1.1
> Host: localhost:9000
> User-Agent: curl/7.81.0
> Accept: */*
>
MINIO IS A HIGH PERFORMANCE OBJECT STORAGE RELEASED UNDER GNU AFFERO GENERAL PUBLIC LICENSE V3.0. IT IS API COMPATIBLE WITH AMAZON S3 CLOUD STORAGE SERVICE. USE MINIO TO BUILD HIGH PERFORMANCE INFRASTRUCTURE FOR MACHINE LEARNING, ANALYTICS AND APPLICATION DATA WORKLOADS.
```
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"net/http"
"os"
"testing"
"time"
jwtgo "github.com/golang-jwt/jwt/v4"
xjwt "github.com/minio/minio/internal/jwt"
)
func getTokenString(accessKey, secretKey string) (string, error) {
claims := xjwt.NewMapClaims()
claims.SetExpiry(UTCNow().Add(defaultJWTExpiry))
claims.SetAccessKey(accessKey)
token := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, claims)
return token.SignedString([]byte(secretKey))
}
// Tests web request authenticator.
func TestWebRequestAuthenticate(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
obj, fsDir, err := prepareFS(ctx)
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(fsDir)
if err = newTestConfig(globalMinioDefaultRegion, obj); err != nil {
t.Fatal(err)
}
creds := globalActiveCred
token, err := getTokenString(creds.AccessKey, creds.SecretKey)
if err != nil {
t.Fatalf("unable get token %s", err)
}
testCases := []struct {
req *http.Request
expectedErr error
}{
// Set valid authorization header.
{
req: &http.Request{
Header: http.Header{
"Authorization": []string{token},
},
},
expectedErr: nil,
},
// No authorization header.
{
req: &http.Request{
Header: http.Header{},
},
expectedErr: errNoAuthToken,
},
// Invalid authorization token.
{
req: &http.Request{
Header: http.Header{
"Authorization": []string{"invalid-token"},
},
},
expectedErr: errAuthentication,
},
}
for i, testCase := range testCases {
_, _, _, gotErr := metricsRequestAuthenticate(testCase.req)
if testCase.expectedErr != gotErr {
t.Errorf("Test %d, expected err %s, got %s", i+1, testCase.expectedErr, gotErr)
}
}
}
func BenchmarkParseJWTStandardClaims(b *testing.B) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
obj, fsDir, err := prepareFS(ctx)
if err != nil {
b.Fatal(err)
}
defer os.RemoveAll(fsDir)
if err = newTestConfig(globalMinioDefaultRegion, obj); err != nil {
b.Fatal(err)
}
creds := globalActiveCred
token, err := authenticateNode(creds.AccessKey, creds.SecretKey, "")
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
err = xjwt.ParseWithStandardClaims(token, xjwt.NewStandardClaims(), []byte(creds.SecretKey))
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkParseJWTMapClaims(b *testing.B) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
obj, fsDir, err := prepareFS(ctx)
if err != nil {
b.Fatal(err)
}
defer os.RemoveAll(fsDir)
if err = newTestConfig(globalMinioDefaultRegion, obj); err != nil {
b.Fatal(err)
}
creds := globalActiveCred
token, err := authenticateNode(creds.AccessKey, creds.SecretKey, "")
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
err = xjwt.ParseWithClaims(token, xjwt.NewMapClaims(), func(*xjwt.MapClaims) ([]byte, error) {
return []byte(creds.SecretKey), nil
})
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkAuthenticateNode(b *testing.B) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
obj, fsDir, err := prepareFS(ctx)
if err != nil {
b.Fatal(err)
}
defer os.RemoveAll(fsDir)
if err = newTestConfig(globalMinioDefaultRegion, obj); err != nil {
b.Fatal(err)
}
creds := globalActiveCred
b.Run("uncached", func(b *testing.B) {
fn := authenticateNode
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
fn(creds.AccessKey, creds.SecretKey, "aud")
}
})
b.Run("cached", func(b *testing.B) {
fn := cachedAuthenticateNode(time.Second)
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
fn(creds.AccessKey, creds.SecretKey, "aud")
}
})
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"encoding/binary"
"encoding/hex"
"fmt"
"hash/crc32"
"io"
"github.com/secure-io/sio-go"
)
func extractInspectV1(keyHex string, r io.Reader, w io.Writer) error {
id, err := hex.DecodeString(keyHex[:8])
if err != nil {
return err
}
key, err := hex.DecodeString(keyHex[8:])
if err != nil {
return err
}
// Verify that CRC is ok.
want := binary.LittleEndian.Uint32(id)
got := crc32.ChecksumIEEE(key)
if want != got {
return fmt.Errorf("Invalid key checksum, want %x, got %x", want, got)
}
stream, err := sio.AES_256_GCM.Stream(key)
if err != nil {
return err
}
// Zero nonce, we only use each key once, and 32 bytes is plenty.
nonce := make([]byte, stream.NonceSize())
encr := stream.DecryptReader(r, nonce, nil)
_, err = io.Copy(w, encr)
return err
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"errors"
"fmt"
"runtime"
"sync"
"time"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio-go/v7/pkg/tags"
bucketsse "github.com/minio/minio/internal/bucket/encryption"
"github.com/minio/minio/internal/bucket/lifecycle"
objectlock "github.com/minio/minio/internal/bucket/object/lock"
"github.com/minio/minio/internal/bucket/replication"
"github.com/minio/minio/internal/bucket/versioning"
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/bucket/policy"
"github.com/minio/pkg/sync/errgroup"
)
// BucketMetadataSys captures all bucket metadata for a given cluster.
type BucketMetadataSys struct {
objAPI ObjectLayer
sync.RWMutex
metadataMap map[string]BucketMetadata
}
// Count returns number of bucket metadata map entries.
func (sys *BucketMetadataSys) Count() int {
sys.RLock()
defer sys.RUnlock()
return len(sys.metadataMap)
}
// Remove bucket metadata from memory.
func (sys *BucketMetadataSys) Remove(buckets ...string) {
sys.Lock()
for _, bucket := range buckets {
delete(sys.metadataMap, bucket)
globalBucketMonitor.DeleteBucket(bucket)
}
sys.Unlock()
}
// RemoveStaleBuckets removes all stale buckets in memory that are not on disk.
func (sys *BucketMetadataSys) RemoveStaleBuckets(diskBuckets set.StringSet) {
sys.Lock()
defer sys.Unlock()
for bucket := range sys.metadataMap {
if diskBuckets.Contains(bucket) {
continue
} // doesn't exist on disk remove from memory.
delete(sys.metadataMap, bucket)
globalBucketMonitor.DeleteBucket(bucket)
}
}
// Set - sets a new metadata in-memory.
// Only a shallow copy is saved and fields with references
// cannot be modified without causing a race condition,
// so they should be replaced atomically and not appended to, etc.
// Data is not persisted to disk.
func (sys *BucketMetadataSys) Set(bucket string, meta BucketMetadata) {
if !isMinioMetaBucketName(bucket) {
sys.Lock()
sys.metadataMap[bucket] = meta
sys.Unlock()
}
}
func (sys *BucketMetadataSys) updateAndParse(ctx context.Context, bucket string, configFile string, configData []byte, parse bool) (updatedAt time.Time, err error) {
objAPI := newObjectLayerFn()
if objAPI == nil {
return updatedAt, errServerNotInitialized
}
if isMinioMetaBucketName(bucket) {
return updatedAt, errInvalidArgument
}
meta, err := loadBucketMetadataParse(ctx, objAPI, bucket, parse)
if err != nil {
if !globalIsErasure && !globalIsDistErasure && errors.Is(err, errVolumeNotFound) {
// Only single drive mode needs this fallback.
meta = newBucketMetadata(bucket)
} else {
return updatedAt, err
}
}
updatedAt = UTCNow()
switch configFile {
case bucketPolicyConfig:
meta.PolicyConfigJSON = configData
meta.PolicyConfigUpdatedAt = updatedAt
case bucketNotificationConfig:
meta.NotificationConfigXML = configData
case bucketLifecycleConfig:
meta.LifecycleConfigXML = configData
meta.LifecycleConfigUpdatedAt = updatedAt
case bucketSSEConfig:
meta.EncryptionConfigXML = configData
meta.EncryptionConfigUpdatedAt = updatedAt
case bucketTaggingConfig:
meta.TaggingConfigXML = configData
meta.TaggingConfigUpdatedAt = updatedAt
case bucketQuotaConfigFile:
meta.QuotaConfigJSON = configData
meta.QuotaConfigUpdatedAt = updatedAt
case objectLockConfig:
meta.ObjectLockConfigXML = configData
meta.ObjectLockConfigUpdatedAt = updatedAt
case bucketVersioningConfig:
meta.VersioningConfigXML = configData
meta.VersioningConfigUpdatedAt = updatedAt
case bucketReplicationConfig:
meta.ReplicationConfigXML = configData
meta.ReplicationConfigUpdatedAt = updatedAt
case bucketTargetsFile:
meta.BucketTargetsConfigJSON, meta.BucketTargetsConfigMetaJSON, err = encryptBucketMetadata(ctx, meta.Name, configData, kms.Context{
bucket: meta.Name,
bucketTargetsFile: bucketTargetsFile,
})
if err != nil {
return updatedAt, fmt.Errorf("Error encrypting bucket target metadata %w", err)
}
default:
return updatedAt, fmt.Errorf("Unknown bucket %s metadata update requested %s", bucket, configFile)
}
if err := meta.Save(ctx, objAPI); err != nil {
return updatedAt, err
}
sys.Set(bucket, meta)
globalNotificationSys.LoadBucketMetadata(bgContext(ctx), bucket) // Do not use caller context here
return updatedAt, nil
}
// Delete delete the bucket metadata for the specified bucket.
// must be used by all callers instead of using Update() with nil configData.
func (sys *BucketMetadataSys) Delete(ctx context.Context, bucket string, configFile string) (updatedAt time.Time, err error) {
return sys.updateAndParse(ctx, bucket, configFile, nil, false)
}
// Update update bucket metadata for the specified bucket.
// The configData data should not be modified after being sent here.
func (sys *BucketMetadataSys) Update(ctx context.Context, bucket string, configFile string, configData []byte) (updatedAt time.Time, err error) {
return sys.updateAndParse(ctx, bucket, configFile, configData, true)
}
// Get metadata for a bucket.
// If no metadata exists errConfigNotFound is returned and a new metadata is returned.
// Only a shallow copy is returned, so referenced data should not be modified,
// but can be replaced atomically.
//
// This function should only be used with
// - GetBucketInfo
// - ListBuckets
// For all other bucket specific metadata, use the relevant
// calls implemented specifically for each of those features.
func (sys *BucketMetadataSys) Get(bucket string) (BucketMetadata, error) {
if isMinioMetaBucketName(bucket) {
return newBucketMetadata(bucket), errConfigNotFound
}
sys.RLock()
defer sys.RUnlock()
meta, ok := sys.metadataMap[bucket]
if !ok {
return newBucketMetadata(bucket), errConfigNotFound
}
return meta, nil
}
// GetVersioningConfig returns configured versioning config
// The returned object may not be modified.
func (sys *BucketMetadataSys) GetVersioningConfig(bucket string) (*versioning.Versioning, time.Time, error) {
meta, _, err := sys.GetConfig(GlobalContext, bucket)
if err != nil {
if errors.Is(err, errConfigNotFound) {
return &versioning.Versioning{XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/"}, meta.Created, nil
}
return &versioning.Versioning{XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/"}, time.Time{}, err
}
return meta.versioningConfig, meta.VersioningConfigUpdatedAt, nil
}
// GetTaggingConfig returns configured tagging config
// The returned object may not be modified.
func (sys *BucketMetadataSys) GetTaggingConfig(bucket string) (*tags.Tags, time.Time, error) {
meta, _, err := sys.GetConfig(GlobalContext, bucket)
if err != nil {
if errors.Is(err, errConfigNotFound) {
return nil, time.Time{}, BucketTaggingNotFound{Bucket: bucket}
}
return nil, time.Time{}, err
}
if meta.taggingConfig == nil {
return nil, time.Time{}, BucketTaggingNotFound{Bucket: bucket}
}
return meta.taggingConfig, meta.TaggingConfigUpdatedAt, nil
}
// GetObjectLockConfig returns configured object lock config
// The returned object may not be modified.
func (sys *BucketMetadataSys) GetObjectLockConfig(bucket string) (*objectlock.Config, time.Time, error) {
meta, _, err := sys.GetConfig(GlobalContext, bucket)
if err != nil {
if errors.Is(err, errConfigNotFound) {
return nil, time.Time{}, BucketObjectLockConfigNotFound{Bucket: bucket}
}
return nil, time.Time{}, err
}
if meta.objectLockConfig == nil {
return nil, time.Time{}, BucketObjectLockConfigNotFound{Bucket: bucket}
}
return meta.objectLockConfig, meta.ObjectLockConfigUpdatedAt, nil
}
// GetLifecycleConfig returns configured lifecycle config
// The returned object may not be modified.
func (sys *BucketMetadataSys) GetLifecycleConfig(bucket string) (*lifecycle.Lifecycle, time.Time, error) {
meta, _, err := sys.GetConfig(GlobalContext, bucket)
if err != nil {
if errors.Is(err, errConfigNotFound) {
return nil, time.Time{}, BucketLifecycleNotFound{Bucket: bucket}
}
return nil, time.Time{}, err
}
if meta.lifecycleConfig == nil {
return nil, time.Time{}, BucketLifecycleNotFound{Bucket: bucket}
}
return meta.lifecycleConfig, meta.LifecycleConfigUpdatedAt, nil
}
// GetNotificationConfig returns configured notification config
// The returned object may not be modified.
func (sys *BucketMetadataSys) GetNotificationConfig(bucket string) (*event.Config, error) {
meta, _, err := sys.GetConfig(GlobalContext, bucket)
if err != nil {
return nil, err
}
return meta.notificationConfig, nil
}
// GetSSEConfig returns configured SSE config
// The returned object may not be modified.
func (sys *BucketMetadataSys) GetSSEConfig(bucket string) (*bucketsse.BucketSSEConfig, time.Time, error) {
meta, _, err := sys.GetConfig(GlobalContext, bucket)
if err != nil {
if errors.Is(err, errConfigNotFound) {
return nil, time.Time{}, BucketSSEConfigNotFound{Bucket: bucket}
}
return nil, time.Time{}, err
}
if meta.sseConfig == nil {
return nil, time.Time{}, BucketSSEConfigNotFound{Bucket: bucket}
}
return meta.sseConfig, meta.EncryptionConfigUpdatedAt, nil
}
// CreatedAt returns the time of creation of bucket
func (sys *BucketMetadataSys) CreatedAt(bucket string) (time.Time, error) {
meta, _, err := sys.GetConfig(GlobalContext, bucket)
if err != nil {
return time.Time{}, err
}
return meta.Created.UTC(), nil
}
// GetPolicyConfig returns configured bucket policy
// The returned object may not be modified.
func (sys *BucketMetadataSys) GetPolicyConfig(bucket string) (*policy.Policy, time.Time, error) {
meta, _, err := sys.GetConfig(GlobalContext, bucket)
if err != nil {
if errors.Is(err, errConfigNotFound) {
return nil, time.Time{}, BucketPolicyNotFound{Bucket: bucket}
}
return nil, time.Time{}, err
}
if meta.policyConfig == nil {
return nil, time.Time{}, BucketPolicyNotFound{Bucket: bucket}
}
return meta.policyConfig, meta.PolicyConfigUpdatedAt, nil
}
// GetQuotaConfig returns configured bucket quota
// The returned object may not be modified.
func (sys *BucketMetadataSys) GetQuotaConfig(ctx context.Context, bucket string) (*madmin.BucketQuota, time.Time, error) {
meta, _, err := sys.GetConfig(ctx, bucket)
if err != nil {
if errors.Is(err, errConfigNotFound) {
return nil, time.Time{}, BucketQuotaConfigNotFound{Bucket: bucket}
}
return nil, time.Time{}, err
}
return meta.quotaConfig, meta.QuotaConfigUpdatedAt, nil
}
// GetReplicationConfig returns configured bucket replication config
// The returned object may not be modified.
func (sys *BucketMetadataSys) GetReplicationConfig(ctx context.Context, bucket string) (*replication.Config, time.Time, error) {
meta, reloaded, err := sys.GetConfig(ctx, bucket)
if err != nil {
if errors.Is(err, errConfigNotFound) {
return nil, time.Time{}, BucketReplicationConfigNotFound{Bucket: bucket}
}
return nil, time.Time{}, err
}
if meta.replicationConfig == nil {
return nil, time.Time{}, BucketReplicationConfigNotFound{Bucket: bucket}
}
if reloaded {
globalBucketTargetSys.set(BucketInfo{
Name: bucket,
}, meta)
}
return meta.replicationConfig, meta.ReplicationConfigUpdatedAt, nil
}
// GetBucketTargetsConfig returns configured bucket targets for this bucket
// The returned object may not be modified.
func (sys *BucketMetadataSys) GetBucketTargetsConfig(bucket string) (*madmin.BucketTargets, error) {
meta, reloaded, err := sys.GetConfig(GlobalContext, bucket)
if err != nil {
if errors.Is(err, errConfigNotFound) {
return nil, BucketRemoteTargetNotFound{Bucket: bucket}
}
return nil, err
}
if meta.bucketTargetConfig == nil {
return nil, BucketRemoteTargetNotFound{Bucket: bucket}
}
if reloaded {
globalBucketTargetSys.set(BucketInfo{
Name: bucket,
}, meta)
}
return meta.bucketTargetConfig, nil
}
// GetConfigFromDisk read bucket metadata config from disk.
func (sys *BucketMetadataSys) GetConfigFromDisk(ctx context.Context, bucket string) (BucketMetadata, error) {
objAPI := newObjectLayerFn()
if objAPI == nil {
return newBucketMetadata(bucket), errServerNotInitialized
}
if isMinioMetaBucketName(bucket) {
return newBucketMetadata(bucket), errInvalidArgument
}
return loadBucketMetadata(ctx, objAPI, bucket)
}
// GetConfig returns a specific configuration from the bucket metadata.
// The returned object may not be modified.
// reloaded will be true if metadata refreshed from disk
func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (meta BucketMetadata, reloaded bool, err error) {
objAPI := newObjectLayerFn()
if objAPI == nil {
return newBucketMetadata(bucket), reloaded, errServerNotInitialized
}
if isMinioMetaBucketName(bucket) {
return newBucketMetadata(bucket), reloaded, errInvalidArgument
}
sys.RLock()
meta, ok := sys.metadataMap[bucket]
sys.RUnlock()
if ok {
return meta, reloaded, nil
}
meta, err = loadBucketMetadata(ctx, objAPI, bucket)
if err != nil {
return meta, reloaded, err
}
sys.Lock()
sys.metadataMap[bucket] = meta
sys.Unlock()
return meta, true, nil
}
// Init - initializes bucket metadata system for all buckets.
func (sys *BucketMetadataSys) Init(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
if objAPI == nil {
return errServerNotInitialized
}
sys.objAPI = objAPI
// Load bucket metadata sys.
sys.init(ctx, buckets)
return nil
}
func (sys *BucketMetadataSys) loadBucketMetadata(ctx context.Context, bucket BucketInfo) error {
meta, err := loadBucketMetadata(ctx, sys.objAPI, bucket.Name)
if err != nil {
return err
}
sys.Lock()
sys.metadataMap[bucket.Name] = meta
sys.Unlock()
return nil
}
// concurrently load bucket metadata to speed up loading bucket metadata.
func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []BucketInfo) {
g := errgroup.WithNErrs(len(buckets))
bucketMetas := make([]BucketMetadata, len(buckets))
for index := range buckets {
index := index
g.Go(func() error {
_, _ = sys.objAPI.HealBucket(ctx, buckets[index].Name, madmin.HealOpts{
// Ensure heal opts for bucket metadata be deep healed all the time.
ScanMode: madmin.HealDeepScan,
Recreate: true,
})
meta, err := loadBucketMetadata(ctx, sys.objAPI, buckets[index].Name)
if err != nil {
return err
}
bucketMetas[index] = meta
return nil
}, index)
}
errs := g.Wait()
for _, err := range errs {
if err != nil {
logger.LogIf(ctx, err)
}
}
// Hold lock here to update in-memory map at once,
// instead of serializing the Go routines.
sys.Lock()
for i, meta := range bucketMetas {
if errs[i] != nil {
continue
}
sys.metadataMap[buckets[i].Name] = meta
}
sys.Unlock()
for i, meta := range bucketMetas {
if errs[i] != nil {
continue
}
globalEventNotifier.set(buckets[i], meta) // set notification targets
globalBucketTargetSys.set(buckets[i], meta) // set remote replication targets
}
}
func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context) {
const bucketMetadataRefresh = 15 * time.Minute
t := time.NewTimer(bucketMetadataRefresh)
defer t.Stop()
for {
select {
case <-ctx.Done():
return
case <-t.C:
buckets, err := sys.objAPI.ListBuckets(ctx, BucketOptions{})
if err != nil {
logger.LogIf(ctx, err)
continue
}
// Handle if we have some buckets in-memory those are stale.
// first delete them and then replace the newer state()
// from disk.
diskBuckets := set.CreateStringSet()
for _, bucket := range buckets {
diskBuckets.Add(bucket.Name)
}
sys.RemoveStaleBuckets(diskBuckets)
for _, bucket := range buckets {
err := sys.loadBucketMetadata(ctx, bucket)
if err != nil {
logger.LogIf(ctx, err)
continue
}
// Check if there is a spare procs, wait 100ms instead
waitForLowIO(runtime.GOMAXPROCS(0), 100*time.Millisecond, currentHTTPIO)
}
t.Reset(bucketMetadataRefresh)
}
}
}
// Loads bucket metadata for all buckets into BucketMetadataSys.
func (sys *BucketMetadataSys) init(ctx context.Context, buckets []BucketInfo) {
count := 100 // load 100 bucket metadata at a time.
for {
if len(buckets) < count {
sys.concurrentLoad(ctx, buckets)
break
}
sys.concurrentLoad(ctx, buckets[:count])
buckets = buckets[count:]
}
if globalIsDistErasure {
go sys.refreshBucketsMetadataLoop(ctx)
}
}
// Reset the state of the BucketMetadataSys.
func (sys *BucketMetadataSys) Reset() {
sys.Lock()
for k := range sys.metadataMap {
delete(sys.metadataMap, k)
}
sys.Unlock()
}
// NewBucketMetadataSys - creates new policy system.
func NewBucketMetadataSys() *BucketMetadataSys {
return &BucketMetadataSys{
metadataMap: make(map[string]BucketMetadata),
}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"net/http"
"os"
"runtime"
"runtime/debug"
"strings"
"time"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
)
// getLocalServerProperty - returns madmin.ServerProperties for only the
// local endpoints from given list of endpoints
func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Request) madmin.ServerProperties {
addr := globalLocalNodeName
if r != nil {
addr = r.Host
}
if globalIsDistErasure {
addr = globalLocalNodeName
}
network := make(map[string]string)
for _, ep := range endpointServerPools {
for _, endpoint := range ep.Endpoints {
nodeName := endpoint.Host
if nodeName == "" {
nodeName = addr
}
if endpoint.IsLocal {
// Only proceed for local endpoints
network[nodeName] = string(madmin.ItemOnline)
continue
}
_, present := network[nodeName]
if !present {
if err := isServerResolvable(endpoint, 5*time.Second); err == nil {
network[nodeName] = string(madmin.ItemOnline)
} else {
network[nodeName] = string(madmin.ItemOffline)
// log once the error
logger.LogOnceIf(context.Background(), err, nodeName)
}
}
}
}
var memstats runtime.MemStats
runtime.ReadMemStats(&memstats)
gcStats := debug.GCStats{
// If stats.PauseQuantiles is non-empty, ReadGCStats fills
// it with quantiles summarizing the distribution of pause time.
// For example, if len(stats.PauseQuantiles) is 5, it will be
// filled with the minimum, 25%, 50%, 75%, and maximum pause times.
PauseQuantiles: make([]time.Duration, 5),
}
debug.ReadGCStats(&gcStats)
// Truncate GC stats to max 5 entries.
if len(gcStats.PauseEnd) > 5 {
gcStats.PauseEnd = gcStats.PauseEnd[len(gcStats.PauseEnd)-5:]
}
if len(gcStats.Pause) > 5 {
gcStats.Pause = gcStats.Pause[len(gcStats.Pause)-5:]
}
props := madmin.ServerProperties{
Endpoint: addr,
Uptime: UTCNow().Unix() - globalBootTime.Unix(),
Version: Version,
CommitID: CommitID,
Network: network,
MemStats: madmin.MemStats{
Alloc: memstats.Alloc,
TotalAlloc: memstats.TotalAlloc,
Mallocs: memstats.Mallocs,
Frees: memstats.Frees,
HeapAlloc: memstats.HeapAlloc,
},
GoMaxProcs: runtime.GOMAXPROCS(0),
NumCPU: runtime.NumCPU(),
RuntimeVersion: runtime.Version(),
GCStats: &madmin.GCStats{
LastGC: gcStats.LastGC,
NumGC: gcStats.NumGC,
PauseTotal: gcStats.PauseTotal,
Pause: gcStats.Pause,
PauseEnd: gcStats.PauseEnd,
},
MinioEnvVars: make(map[string]string, 10),
}
sensitive := map[string]struct{}{
config.EnvAccessKey: {},
config.EnvSecretKey: {},
config.EnvRootUser: {},
config.EnvRootPassword: {},
config.EnvMinIOSubnetAPIKey: {},
kms.EnvKMSSecretKey: {},
}
for _, v := range os.Environ() {
if !strings.HasPrefix(v, "MINIO") && !strings.HasPrefix(v, "_MINIO") {
continue
}
split := strings.SplitN(v, "=", 2)
key := split[0]
value := ""
if len(split) > 1 {
value = split[1]
}
// Do not send sensitive creds.
if _, ok := sensitive[key]; ok || strings.Contains(strings.ToLower(key), "password") || strings.HasSuffix(strings.ToLower(key), "key") {
props.MinioEnvVars[key] = "*** EXISTS, REDACTED ***"
continue
}
props.MinioEnvVars[key] = value
}
objLayer := newObjectLayerFn()
if objLayer != nil {
storageInfo := objLayer.LocalStorageInfo(GlobalContext)
props.State = string(madmin.ItemOnline)
props.Disks = storageInfo.Disks
} else {
props.State = string(madmin.ItemInitializing)
props.Disks = getOfflineDisks("", globalEndpoints)
}
return props
}
<file_sep># AGPLv3 Compliance
We have designed MinIO as an Open Source software for the Open Source software community. This requires applications to consider whether their usage of MinIO is in compliance with the GNU AGPLv3 [license](https://github.com/minio/minio/blob/master/LICENSE).
MinIO cannot make the determination as to whether your application's usage of MinIO is in compliance with the AGPLv3 license requirements. You should instead rely on your own legal counsel or licensing specialists to audit and ensure your application is in compliance with the licenses of MinIO and all other open-source projects with which your application integrates or interacts. We understand that AGPLv3 licensing is complex and nuanced. It is for that reason we strongly encourage using experts in licensing to make any such determinations around compliance instead of relying on apocryphal or anecdotal advice.
[MinIO Commercial Licensing](https://min.io/pricing) is the best option for applications that trigger AGPLv3 obligations (e.g. open sourcing your application). Applications using MinIO - or any other OSS-licensed code - without validating their usage do so at their own risk.
<file_sep># Introduction
This document outlines how to make hotfix binaries and containers for MinIO?. The main focus in this article is about how to backport patches to a specific branch and finally building binaries/containers.
## Pre-pre requisite
- A working knowledge of MinIO codebase and its various components.
- A working knowledge of AWS S3 API behaviors and corner cases.
## Pre-requisite for backporting any fixes
Fixes that are allowed a backport must satisfy any of the following criteria's:
- A fix must not be a feature, for example.
```
commit faf013ec84051b92ae0f420a658b8d35bb7bb000
Author: <NAME> <<EMAIL>>
Date: Thu Nov 18 12:15:22 2021 -0800
Improve performance on multiple versions (#13573)
```
- A fix must be a valid fix that was reproduced and seen in a customer environment, for example.
```
commit 886262e58af77ebc7c836ef587c08544e9a0c271
Author: Harshavardhana <<EMAIL>>
Date: Wed Nov 17 15:49:12 2021 -0800
heal legacy objects when versioning is enabled after upgrade (#13671)
```
- A security fix must be backported if a customer is affected by it, we have a mechanism in SUBNET to send out notifications to affected customers in such situations, this is a mandatory requirement.
```
commit 99bf4d0c429f04dbd013ba98840d07b759ae1702 (tag: RELEASE.2019-06-15T23-07-18Z)
Author: Harshavardhana <<EMAIL>>
Date: Sat Jun 15 11:27:17 2019 -0700
[security] Match ${aws:username} exactly instead of prefix match (#7791)
This PR fixes a security issue where an IAM user based
on his policy is granted more privileges than restricted
by the users IAM policy.
This is due to an issue of prefix based Matcher() function
which was incorrectly matching prefix based on resource
prefixes instead of exact match.
```
- There is always a possibility of a fix that is new, it is advised that the developer must make sure that the fix is sent upstream, reviewed and merged to the master branch.
## Creating a hotfix branch
Customers in MinIO are allowed LTS on any release they choose to standardize. Production setups seldom change and require maintenance. Hotfix branches are such maintenance branches that allow customers to operate a production cluster without drastic changes to their deployment.
## Backporting a fix
Developer is advised to clone the MinIO source and checkout the MinIO release tag customer is currently on.
```
ฮป git checkout RELEASE.2021-04-22T15-44-28Z
```
Create a branch and proceed to push the branch **upstream**
> (upstream here points to [email protected]:minio/minio.git)
```
ฮป git branch -m RELEASE.2021-04-22T15-44-28Z.hotfix
ฮป git push -u upstream RELEASE.2021-04-22T15-44-28Z.hotfix
```
Pick the relevant commit-id say for example commit-id from the master branch
```
commit 4f3317effea38c203c358af9cb5ce3c0e4173976
Author: Klaus Post <<EMAIL>>
Date: Mon Nov 8 08:41:27 2021 -0800
Close stream on panic (#13605)
Always close streamHTTPResponse on panic on main thread to avoid
write/flush after response handler has returned.
```
```
ฮป git cherry-pick 4f3317effea38c203c358af9cb5ce3c0e4173976
```
*A self contained **patch** usually applies fine on the hotfix branch during backports as long it is self contained. There are situations however this may lead to conflicts and the patch will not cleanly apply. Conflicts might be trivial which can be resolved easily, when conflicts seem to be non-trivial or touches the part of the code-base the developer is not confident - to get additional clarity reach out to #hack on MinIOHQ slack channel. Hasty changes must be avoided, minor fixes and logs may be added to hotfix branches but this should not be followed as practice.*
Once the **patch** is successfully applied, developer must run tests to validate the fix that was backported by running following tests, locally.
Unit tests
```
ฮป make test
```
Verify different type of MinIO deployments work
```
ฮป make verify
```
Verify if healing and replacing a drive works
```
ฮป make verify-healing
```
At this point in time the backport is ready to be submitted as a pull request to the relevant branch. A pull request is recommended to ensure [mint](http://github.com/minio/mint) tests are validated. Pull request also ensures code-reviews for the backports incase of any unforeseen regressions.
### Building a hotfix binary and container
To add a hotfix tag to the binary version and embed the relevant `commit-id` following build helpers are available
#### Builds the hotfix binary and uploads to https;//dl.min.io
```
ฮป CRED_DIR=/media/builder/minio make hotfix-push
```
#### Builds the hotfix container and pushes to docker.io/minio/minio
```
ฮป CRED_DIR=/media/builder/minio make docker-hotfix-push
```
Once this has been provided to the customer relevant binary will be uploaded from our *release server* securely, directly to <https://dl.minio.io/server/minio/hotfixes/archive/>
<file_sep>#!/usr/bin/env bash
if [ -n "$TEST_DEBUG" ]; then
set -x
fi
trap 'catch $LINENO' ERR
# shellcheck disable=SC2120
catch() {
if [ $# -ne 0 ]; then
echo "error on line $1"
for site in sitea siteb sitec; do
echo "$site server logs ========="
cat "/tmp/${site}_1.log"
echo "==========================="
cat "/tmp/${site}_2.log"
done
fi
echo "Cleaning up instances of MinIO"
pkill minio
pkill -9 minio
rm -rf /tmp/multisitea
rm -rf /tmp/multisiteb
rm -rf /tmp/multisitec
}
catch
set -e
export MINIO_CI_CD=1
export MINIO_BROWSER=off
export MINIO_ROOT_USER="minio"
export MINIO_ROOT_PASSWORD="<PASSWORD>"
export MINIO_KMS_AUTO_ENCRYPTION=off
export MINIO_PROMETHEUS_AUTH_TYPE=public
export MINIO_KMS_SECRET_KEY=my-minio-key:<KEY>
unset MINIO_KMS_KES_CERT_FILE
unset MINIO_KMS_KES_KEY_FILE
unset MINIO_KMS_KES_ENDPOINT
unset MINIO_KMS_KES_KEY_NAME
go build ./docs/debugging/s3-check-md5/
wget -O mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
chmod +x mc
wget -O mc.RELEASE.2021-03-12T03-36-59Z https://dl.minio.io/client/mc/release/linux-amd64/archive/mc.RELEASE.2021-03-12T03-36-59Z &&
chmod +x mc.RELEASE.2021-03-12T03-36-59Z
minio server --address 127.0.0.1:9001 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_1.log 2>&1 &
minio server --address 127.0.0.1:9002 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_2.log 2>&1 &
minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_1.log 2>&1 &
minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
minio server --address 127.0.0.1:9005 "http://127.0.0.1:9005/tmp/multisitec/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9006/tmp/multisitec/data/disterasure/xl{5...8}" >/tmp/sitec_1.log 2>&1 &
minio server --address 127.0.0.1:9006 "http://127.0.0.1:9005/tmp/multisitec/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9006/tmp/multisitec/data/disterasure/xl{5...8}" >/tmp/sitec_2.log 2>&1 &
sleep 30
export MC_HOST_sitea=http://minio:[email protected]:9001
export MC_HOST_siteb=http://minio:[email protected]:9004
export MC_HOST_sitec=http://minio:[email protected]:9006
./mc mb sitea/bucket
./mc version enable sitea/bucket
./mc mb -l sitea/olockbucket
./mc mb siteb/bucket/
./mc version enable siteb/bucket/
./mc mb -l siteb/olockbucket/
./mc mb sitec/bucket/
./mc version enable sitec/bucket/
./mc mb -l sitec/olockbucket
echo "adding replication rule for a -> b : ${remote_arn}"
sleep 1
./mc replicate add sitea/bucket/ \
--remote-bucket http://minio:[email protected]:9004/bucket \
--replicate "existing-objects,delete,delete-marker,replica-metadata-sync"
sleep 1
echo "adding replication rule for b -> a : ${remote_arn}"
./mc replicate add siteb/bucket/ \
--remote-bucket http://minio:[email protected]:9001/bucket \
--replicate "existing-objects,delete,delete-marker,replica-metadata-sync"
sleep 1
echo "adding replication rule for a -> c : ${remote_arn}"
./mc replicate add sitea/bucket/ \
--remote-bucket http://minio:[email protected]:9006/bucket \
--replicate "existing-objects,delete,delete-marker,replica-metadata-sync" --priority 2
sleep 1
echo "adding replication rule for c -> a : ${remote_arn}"
./mc replicate add sitec/bucket/ \
--remote-bucket http://minio:[email protected]:9001/bucket \
--replicate "existing-objects,delete,delete-marker,replica-metadata-sync" --priority 2
sleep 1
echo "adding replication rule for b -> c : ${remote_arn}"
./mc replicate add siteb/bucket/ \
--remote-bucket http://minio:[email protected]:9006/bucket \
--replicate "existing-objects,delete,delete-marker,replica-metadata-sync" --priority 3
sleep 1
echo "adding replication rule for c -> b : ${remote_arn}"
./mc replicate add sitec/bucket/ \
--remote-bucket http://minio:[email protected]:9004/bucket \
--replicate "existing-objects,delete,delete-marker,replica-metadata-sync" --priority 3
sleep 1
echo "adding replication rule for olockbucket a -> b : ${remote_arn}"
./mc replicate add sitea/olockbucket/ \
--remote-bucket http://minio:[email protected]:9004/olockbucket \
--replicate "existing-objects,delete,delete-marker,replica-metadata-sync"
sleep 1
echo "adding replication rule for olockbucket b -> a : ${remote_arn}"
./mc replicate add siteb/olockbucket/ \
--remote-bucket http://minio:[email protected]:9001/olockbucket \
--replicate "existing-objects,delete,delete-marker,replica-metadata-sync"
sleep 1
echo "adding replication rule for olockbucket a -> c : ${remote_arn}"
./mc replicate add sitea/olockbucket/ \
--remote-bucket http://minio:[email protected]:9006/olockbucket \
--replicate "existing-objects,delete,delete-marker,replica-metadata-sync" --priority 2
sleep 1
echo "adding replication rule for olockbucket c -> a : ${remote_arn}"
./mc replicate add sitec/olockbucket/ \
--remote-bucket http://minio:[email protected]:9001/olockbucket \
--replicate "existing-objects,delete,delete-marker,replica-metadata-sync" --priority 2
sleep 1
echo "adding replication rule for olockbucket b -> c : ${remote_arn}"
./mc replicate add siteb/olockbucket/ \
--remote-bucket http://minio:[email protected]:9006/olockbucket \
--replicate "existing-objects,delete,delete-marker,replica-metadata-sync" --priority 3
sleep 1
echo "adding replication rule for olockbucket c -> b : ${remote_arn}"
./mc replicate add sitec/olockbucket/ \
--remote-bucket http://minio:[email protected]:9004/olockbucket \
--replicate "existing-objects,delete,delete-marker,replica-metadata-sync" --priority 3
sleep 1
echo "Set default governance retention 30d"
./mc retention set --default governance 30d sitea/olockbucket
echo "Copying data to source sitea/bucket"
./mc cp --encrypt "sitea/" --quiet /etc/hosts sitea/bucket
sleep 1
echo "Copying data to source sitea/olockbucket"
./mc cp --quiet /etc/hosts sitea/olockbucket
sleep 1
echo "Verifying the metadata difference between source and target"
if diff -pruN <(./mc stat --json sitea/bucket/hosts | jq .) <(./mc stat --json siteb/bucket/hosts | jq .) | grep -q 'COMPLETED\|REPLICA'; then
echo "verified sitea-> COMPLETED, siteb-> REPLICA"
fi
if diff -pruN <(./mc stat --json sitea/bucket/hosts | jq .) <(./mc stat --json sitec/bucket/hosts | jq .) | grep -q 'COMPLETED\|REPLICA'; then
echo "verified sitea-> COMPLETED, sitec-> REPLICA"
fi
echo "Verifying the metadata difference between source and target"
if diff -pruN <(./mc stat --json sitea/olockbucket/hosts | jq .) <(./mc stat --json siteb/olockbucket/hosts | jq .) | grep -q 'COMPLETED\|REPLICA'; then
echo "verified sitea-> COMPLETED, siteb-> REPLICA"
fi
if diff -pruN <(./mc stat --json sitea/olockbucket/hosts | jq .) <(./mc stat --json sitec/olockbucket/hosts | jq .) | grep -q 'COMPLETED\|REPLICA'; then
echo "verified sitea-> COMPLETED, sitec-> REPLICA"
fi
sleep 5
head -c 221227088 </dev/urandom >200M
./mc.RELEASE.2021-03-12T03-36-59Z cp --config-dir ~/.mc --encrypt "sitea" --quiet 200M "sitea/bucket/200M-enc-v1"
./mc.RELEASE.2021-03-12T03-36-59Z cp --config-dir ~/.mc --quiet 200M "sitea/bucket/200M-v1"
./mc cp --encrypt "sitea" --quiet 200M "sitea/bucket/200M-enc-v2"
./mc cp --quiet 200M "sitea/bucket/200M-v2"
sleep 10
echo "Verifying ETag for all objects"
./s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9001/ -bucket bucket
./s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9002/ -bucket bucket
./s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9003/ -bucket bucket
./s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9004/ -bucket bucket
./s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9005/ -bucket bucket
./s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9006/ -bucket bucket
./s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9001/ -bucket olockbucket
./s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9002/ -bucket olockbucket
./s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9003/ -bucket olockbucket
./s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9004/ -bucket olockbucket
./s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9005/ -bucket olockbucket
./s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9006/ -bucket olockbucket
catch
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"sort"
"testing"
"time"
"github.com/minio/minio/internal/bucket/lifecycle"
xhttp "github.com/minio/minio/internal/http"
)
func Test_hashDeterministicString(t *testing.T) {
tests := []struct {
name string
arg map[string]string
}{
{
name: "zero",
arg: map[string]string{},
},
{
name: "nil",
arg: nil,
},
{
name: "one",
arg: map[string]string{"key": "value"},
},
{
name: "several",
arg: map[string]string{
xhttp.AmzRestore: "FAILED",
xhttp.ContentMD5: mustGetUUID(),
xhttp.AmzBucketReplicationStatus: "PENDING",
xhttp.ContentType: "application/json",
},
},
{
name: "someempty",
arg: map[string]string{
xhttp.AmzRestore: "",
xhttp.ContentMD5: mustGetUUID(),
xhttp.AmzBucketReplicationStatus: "",
xhttp.ContentType: "application/json",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
const n = 100
want := hashDeterministicString(tt.arg)
m := tt.arg
for i := 0; i < n; i++ {
if got := hashDeterministicString(m); got != want {
t.Errorf("hashDeterministicString() = %v, want %v", got, want)
}
}
// Check casual collisions
if m == nil {
m = make(map[string]string)
}
m["12312312"] = ""
if got := hashDeterministicString(m); got == want {
t.Errorf("hashDeterministicString() = %v, does not want %v", got, want)
}
want = hashDeterministicString(m)
delete(m, "12312312")
m["another"] = ""
if got := hashDeterministicString(m); got == want {
t.Errorf("hashDeterministicString() = %v, does not want %v", got, want)
}
want = hashDeterministicString(m)
m["another"] = "hashDeterministicString"
if got := hashDeterministicString(m); got == want {
t.Errorf("hashDeterministicString() = %v, does not want %v", got, want)
}
want = hashDeterministicString(m)
m["another"] = "hashDeterministicStringhashDeterministicStringhashDeterministicStringhashDeterministicStringhashDeterministicStringhashDeterministicStringhashDeterministicString"
if got := hashDeterministicString(m); got == want {
t.Errorf("hashDeterministicString() = %v, does not want %v", got, want)
}
// Flip key/value
want = hashDeterministicString(m)
delete(m, "another")
m["hashDeterministicStringhashDeterministicStringhashDeterministicStringhashDeterministicStringhashDeterministicStringhashDeterministicStringhashDeterministicString"] = "another"
if got := hashDeterministicString(m); got == want {
t.Errorf("hashDeterministicString() = %v, does not want %v", got, want)
}
})
}
}
func TestGetFileInfoVersions(t *testing.T) {
basefi := FileInfo{
Volume: "volume",
Name: "object-name",
VersionID: "756100c6-b393-4981-928a-d49bbc164741",
IsLatest: true,
Deleted: false,
TransitionStatus: "",
DataDir: "bffea160-ca7f-465f-98bc-9b4f1c3ba1ef",
XLV1: false,
ModTime: time.Now().UTC(),
Size: 0,
Mode: 0,
Metadata: nil,
Parts: nil,
Erasure: ErasureInfo{
Algorithm: ReedSolomon.String(),
DataBlocks: 4,
ParityBlocks: 2,
BlockSize: 10000,
Index: 1,
Distribution: []int{1, 2, 3, 4, 5, 6, 7, 8},
Checksums: []ChecksumInfo{{
PartNumber: 1,
Algorithm: HighwayHash256S,
Hash: nil,
}},
},
MarkDeleted: false,
NumVersions: 1,
SuccessorModTime: time.Time{},
}
xl := xlMetaV2{}
var versions []FileInfo
var freeVersionIDs []string
for i := 0; i < 5; i++ {
fi := basefi
fi.VersionID = mustGetUUID()
fi.DataDir = mustGetUUID()
fi.ModTime = basefi.ModTime.Add(time.Duration(i) * time.Second)
if err := xl.AddVersion(fi); err != nil {
t.Fatalf("%d: Failed to add version %v", i+1, err)
}
if i > 3 {
// Simulate transition of a version
transfi := fi
transfi.TransitionStatus = lifecycle.TransitionComplete
transfi.TransitionTier = "MINIO-TIER"
transfi.TransitionedObjName = mustGetUUID()
xl.DeleteVersion(transfi)
fi.SetTierFreeVersionID(mustGetUUID())
// delete this version leading to a free version
xl.DeleteVersion(fi)
freeVersionIDs = append(freeVersionIDs, fi.TierFreeVersionID())
} else {
versions = append(versions, fi)
}
}
buf, err := xl.AppendTo(nil)
if err != nil {
t.Fatalf("Failed to serialize xlmeta %v", err)
}
fivs, err := getFileInfoVersions(buf, basefi.Volume, basefi.Name)
if err != nil {
t.Fatalf("getFileInfoVersions failed: %v", err)
}
sort.Slice(versions, func(i, j int) bool {
if versions[i].IsLatest {
return true
}
if versions[j].IsLatest {
return false
}
return versions[i].ModTime.After(versions[j].ModTime)
})
for i, fi := range fivs.Versions {
if fi.VersionID != versions[i].VersionID {
t.Fatalf("getFileInfoVersions: versions don't match at %d, version id expected %s but got %s", i, fi.VersionID, versions[i].VersionID)
}
}
for i, free := range fivs.FreeVersions {
if free.VersionID != freeVersionIDs[i] {
t.Fatalf("getFileInfoVersions: free versions don't match at %d, version id expected %s but got %s", i, free.VersionID, freeVersionIDs[i])
}
}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package logger
import (
"crypto/tls"
"errors"
"strconv"
"strings"
"github.com/minio/pkg/env"
xnet "github.com/minio/pkg/net"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/logger/target/http"
"github.com/minio/minio/internal/logger/target/kafka"
)
// Console logger target
type Console struct {
Enabled bool `json:"enabled"`
}
// Audit/Logger constants
const (
Endpoint = "endpoint"
AuthToken = "auth_token"
ClientCert = "client_cert"
ClientKey = "client_key"
QueueSize = "queue_size"
QueueDir = "queue_dir"
Proxy = "proxy"
KafkaBrokers = "brokers"
KafkaTopic = "topic"
KafkaTLS = "tls"
KafkaTLSSkipVerify = "tls_skip_verify"
KafkaTLSClientAuth = "tls_client_auth"
KafkaSASL = "sasl"
KafkaSASLUsername = "sasl_username"
KafkaSASLPassword = "<PASSWORD>"
KafkaSASLMechanism = "sasl_mechanism"
KafkaClientTLSCert = "client_tls_cert"
KafkaClientTLSKey = "client_tls_key"
KafkaVersion = "version"
KafkaQueueDir = "queue_dir"
KafkaQueueSize = "queue_size"
EnvLoggerWebhookEnable = "MINIO_LOGGER_WEBHOOK_ENABLE"
EnvLoggerWebhookEndpoint = "MINIO_LOGGER_WEBHOOK_ENDPOINT"
EnvLoggerWebhookAuthToken = "<PASSWORD>AUTH_<PASSWORD>"
EnvLoggerWebhookClientCert = "MINIO_LOGGER_WEBHOOK_CLIENT_CERT"
EnvLoggerWebhookClientKey = "MINIO_LOGGER_WEBHOOK_CLIENT_KEY"
EnvLoggerWebhookProxy = "MINIO_LOGGER_WEBHOOK_PROXY"
EnvLoggerWebhookQueueSize = "MINIO_LOGGER_WEBHOOK_QUEUE_SIZE"
EnvLoggerWebhookQueueDir = "MINIO_LOGGER_WEBHOOK_QUEUE_DIR"
EnvAuditWebhookEnable = "MINIO_AUDIT_WEBHOOK_ENABLE"
EnvAuditWebhookEndpoint = "MINIO_AUDIT_WEBHOOK_ENDPOINT"
EnvAuditWebhookAuthToken = "<PASSWORD>AUTH_TOKEN"
EnvAuditWebhookClientCert = "MINIO_AUDIT_WEBHOOK_CLIENT_CERT"
EnvAuditWebhookClientKey = "MINIO_AUDIT_WEBHOOK_CLIENT_KEY"
EnvAuditWebhookQueueSize = "MINIO_AUDIT_WEBHOOK_QUEUE_SIZE"
EnvAuditWebhookQueueDir = "MINIO_AUDIT_WEBHOOK_QUEUE_DIR"
EnvKafkaEnable = "MINIO_AUDIT_KAFKA_ENABLE"
EnvKafkaBrokers = "MINIO_AUDIT_KAFKA_BROKERS"
EnvKafkaTopic = "MINIO_AUDIT_KAFKA_TOPIC"
EnvKafkaTLS = "MINIO_AUDIT_KAFKA_TLS"
EnvKafkaTLSSkipVerify = "MINIO_AUDIT_KAFKA_TLS_SKIP_VERIFY"
EnvKafkaTLSClientAuth = "MINIO_AUDIT_KAFKA_TLS_CLIENT_AUTH"
EnvKafkaSASLEnable = "MINIO_AUDIT_KAFKA_SASL"
EnvKafkaSASLUsername = "MINIO_AUDIT_KAFKA_SASL_USERNAME"
EnvKafkaSASLPassword = "<PASSWORD>AUDIT_KAFKA_SASL_PASSWORD"
EnvKafkaSASLMechanism = "MINIO_AUDIT_KAFKA_SASL_MECHANISM"
EnvKafkaClientTLSCert = "MINIO_AUDIT_KAFKA_CLIENT_TLS_CERT"
EnvKafkaClientTLSKey = "MINIO_AUDIT_KAFKA_CLIENT_TLS_KEY"
EnvKafkaVersion = "MINIO_AUDIT_KAFKA_VERSION"
EnvKafkaQueueDir = "MINIO_AUDIT_KAFKA_QUEUE_DIR"
EnvKafkaQueueSize = "MINIO_AUDIT_KAFKA_QUEUE_SIZE"
loggerTargetNamePrefix = "logger-"
auditTargetNamePrefix = "audit-"
)
// Default KVS for loggerHTTP and loggerAuditHTTP
var (
DefaultLoggerWebhookKVS = config.KVS{
config.KV{
Key: config.Enable,
Value: config.EnableOff,
},
config.KV{
Key: Endpoint,
Value: "",
},
config.KV{
Key: AuthToken,
Value: "",
},
config.KV{
Key: ClientCert,
Value: "",
},
config.KV{
Key: ClientKey,
Value: "",
},
config.KV{
Key: Proxy,
Value: "",
},
config.KV{
Key: QueueSize,
Value: "100000",
},
config.KV{
Key: QueueDir,
Value: "",
},
}
DefaultAuditWebhookKVS = config.KVS{
config.KV{
Key: config.Enable,
Value: config.EnableOff,
},
config.KV{
Key: Endpoint,
Value: "",
},
config.KV{
Key: AuthToken,
Value: "",
},
config.KV{
Key: ClientCert,
Value: "",
},
config.KV{
Key: ClientKey,
Value: "",
},
config.KV{
Key: QueueSize,
Value: "100000",
},
config.KV{
Key: QueueDir,
Value: "",
},
}
DefaultAuditKafkaKVS = config.KVS{
config.KV{
Key: config.Enable,
Value: config.EnableOff,
},
config.KV{
Key: KafkaTopic,
Value: "",
},
config.KV{
Key: KafkaBrokers,
Value: "",
},
config.KV{
Key: KafkaSASLUsername,
Value: "",
},
config.KV{
Key: KafkaSASLPassword,
Value: "",
},
config.KV{
Key: KafkaSASLMechanism,
Value: "plain",
},
config.KV{
Key: KafkaClientTLSCert,
Value: "",
},
config.KV{
Key: KafkaClientTLSKey,
Value: "",
},
config.KV{
Key: KafkaTLSClientAuth,
Value: "0",
},
config.KV{
Key: KafkaSASL,
Value: config.EnableOff,
},
config.KV{
Key: KafkaTLS,
Value: config.EnableOff,
},
config.KV{
Key: KafkaTLSSkipVerify,
Value: config.EnableOff,
},
config.KV{
Key: KafkaVersion,
Value: "",
},
config.KV{
Key: QueueSize,
Value: "100000",
},
config.KV{
Key: QueueDir,
Value: "",
},
}
)
// Config console and http logger targets
type Config struct {
Console Console `json:"console"`
HTTP map[string]http.Config `json:"http"`
AuditWebhook map[string]http.Config `json:"audit"`
AuditKafka map[string]kafka.Config `json:"audit_kafka"`
}
// NewConfig - initialize new logger config.
func NewConfig() Config {
cfg := Config{
// Console logging is on by default
Console: Console{
Enabled: true,
},
HTTP: make(map[string]http.Config),
AuditWebhook: make(map[string]http.Config),
AuditKafka: make(map[string]kafka.Config),
}
return cfg
}
func getCfgVal(envName, key, defaultValue string) string {
if key != config.Default {
envName = envName + config.Default + key
}
return env.Get(envName, defaultValue)
}
func lookupLegacyConfigForSubSys(subSys string) Config {
cfg := NewConfig()
switch subSys {
case config.LoggerWebhookSubSys:
var loggerTargets []string
envs := env.List(legacyEnvLoggerHTTPEndpoint)
for _, k := range envs {
target := strings.TrimPrefix(k, legacyEnvLoggerHTTPEndpoint+config.Default)
if target == legacyEnvLoggerHTTPEndpoint {
target = config.Default
}
loggerTargets = append(loggerTargets, target)
}
// Load HTTP logger from the environment if found
for _, target := range loggerTargets {
endpoint := getCfgVal(legacyEnvLoggerHTTPEndpoint, target, "")
if endpoint == "" {
continue
}
cfg.HTTP[target] = http.Config{
Enabled: true,
Endpoint: endpoint,
}
}
case config.AuditWebhookSubSys:
// List legacy audit ENVs if any.
var loggerAuditTargets []string
envs := env.List(legacyEnvAuditLoggerHTTPEndpoint)
for _, k := range envs {
target := strings.TrimPrefix(k, legacyEnvAuditLoggerHTTPEndpoint+config.Default)
if target == legacyEnvAuditLoggerHTTPEndpoint {
target = config.Default
}
loggerAuditTargets = append(loggerAuditTargets, target)
}
for _, target := range loggerAuditTargets {
endpoint := getCfgVal(legacyEnvAuditLoggerHTTPEndpoint, target, "")
if endpoint == "" {
continue
}
cfg.AuditWebhook[target] = http.Config{
Enabled: true,
Endpoint: endpoint,
}
}
}
return cfg
}
func lookupAuditKafkaConfig(scfg config.Config, cfg Config) (Config, error) {
for k, kv := range config.Merge(scfg[config.AuditKafkaSubSys], EnvKafkaEnable, DefaultAuditKafkaKVS) {
enabledCfgVal := getCfgVal(EnvKafkaEnable, k, kv.Get(config.Enable))
enabled, err := config.ParseBool(enabledCfgVal)
if err != nil {
return cfg, err
}
if !enabled {
continue
}
var brokers []xnet.Host
kafkaBrokers := getCfgVal(EnvKafkaBrokers, k, kv.Get(KafkaBrokers))
if len(kafkaBrokers) == 0 {
return cfg, config.Errorf("kafka 'brokers' cannot be empty")
}
for _, s := range strings.Split(kafkaBrokers, config.ValueSeparator) {
var host *xnet.Host
host, err = xnet.ParseHost(s)
if err != nil {
break
}
brokers = append(brokers, *host)
}
if err != nil {
return cfg, err
}
clientAuthCfgVal := getCfgVal(EnvKafkaTLSClientAuth, k, kv.Get(KafkaTLSClientAuth))
clientAuth, err := strconv.Atoi(clientAuthCfgVal)
if err != nil {
return cfg, err
}
kafkaArgs := kafka.Config{
Enabled: enabled,
Brokers: brokers,
Topic: getCfgVal(EnvKafkaTopic, k, kv.Get(KafkaTopic)),
Version: getCfgVal(EnvKafkaVersion, k, kv.Get(KafkaVersion)),
}
kafkaArgs.TLS.Enable = getCfgVal(EnvKafkaTLS, k, kv.Get(KafkaTLS)) == config.EnableOn
kafkaArgs.TLS.SkipVerify = getCfgVal(EnvKafkaTLSSkipVerify, k, kv.Get(KafkaTLSSkipVerify)) == config.EnableOn
kafkaArgs.TLS.ClientAuth = tls.ClientAuthType(clientAuth)
kafkaArgs.TLS.ClientTLSCert = getCfgVal(EnvKafkaClientTLSCert, k, kv.Get(KafkaClientTLSCert))
kafkaArgs.TLS.ClientTLSKey = getCfgVal(EnvKafkaClientTLSKey, k, kv.Get(KafkaClientTLSKey))
kafkaArgs.SASL.Enable = getCfgVal(EnvKafkaSASLEnable, k, kv.Get(KafkaSASL)) == config.EnableOn
kafkaArgs.SASL.User = getCfgVal(EnvKafkaSASLUsername, k, kv.Get(KafkaSASLUsername))
kafkaArgs.SASL.Password = getCfgVal(EnvKafkaSASLPassword, k, kv.Get(KafkaSASLPassword))
kafkaArgs.SASL.Mechanism = getCfgVal(EnvKafkaSASLMechanism, k, kv.Get(KafkaSASLMechanism))
kafkaArgs.QueueDir = getCfgVal(EnvKafkaQueueDir, k, kv.Get(KafkaQueueDir))
queueSizeCfgVal := getCfgVal(EnvKafkaQueueSize, k, kv.Get(KafkaQueueSize))
queueSize, err := strconv.Atoi(queueSizeCfgVal)
if err != nil {
return cfg, err
}
if queueSize <= 0 {
return cfg, errors.New("invalid queue_size value")
}
kafkaArgs.QueueSize = queueSize
cfg.AuditKafka[k] = kafkaArgs
}
return cfg, nil
}
func lookupLoggerWebhookConfig(scfg config.Config, cfg Config) (Config, error) {
envs := env.List(EnvLoggerWebhookEndpoint)
var loggerTargets []string
for _, k := range envs {
target := strings.TrimPrefix(k, EnvLoggerWebhookEndpoint+config.Default)
if target == EnvLoggerWebhookEndpoint {
target = config.Default
}
loggerTargets = append(loggerTargets, target)
}
// Load HTTP logger from the environment if found
for _, target := range loggerTargets {
if v, ok := cfg.HTTP[target]; ok && v.Enabled {
// This target is already enabled using the
// legacy environment variables, ignore.
continue
}
enableCfgVal := getCfgVal(EnvLoggerWebhookEnable, target, "")
enable, err := config.ParseBool(enableCfgVal)
if err != nil || !enable {
continue
}
clientCert := getCfgVal(EnvLoggerWebhookClientCert, target, "")
clientKey := getCfgVal(EnvLoggerWebhookClientKey, target, "")
err = config.EnsureCertAndKey(clientCert, clientKey)
if err != nil {
return cfg, err
}
queueSizeCfgVal := getCfgVal(EnvLoggerWebhookQueueSize, target, "100000")
queueSize, err := strconv.Atoi(queueSizeCfgVal)
if err != nil {
return cfg, err
}
if queueSize <= 0 {
return cfg, errors.New("invalid queue_size value")
}
cfg.HTTP[target] = http.Config{
Enabled: true,
Endpoint: getCfgVal(EnvLoggerWebhookEndpoint, target, ""),
AuthToken: getCfgVal(EnvLoggerWebhookAuthToken, target, ""),
ClientCert: clientCert,
ClientKey: clientKey,
Proxy: getCfgVal(EnvLoggerWebhookProxy, target, ""),
QueueSize: queueSize,
QueueDir: getCfgVal(EnvLoggerWebhookQueueDir, target, ""),
Name: loggerTargetNamePrefix + target,
}
}
for starget, kv := range scfg[config.LoggerWebhookSubSys] {
if l, ok := cfg.HTTP[starget]; ok && l.Enabled {
// Ignore this HTTP logger config since there is
// a target with the same name loaded and enabled
// from the environment.
continue
}
subSysTarget := config.LoggerWebhookSubSys
if starget != config.Default {
subSysTarget = config.LoggerWebhookSubSys + config.SubSystemSeparator + starget
}
if err := config.CheckValidKeys(subSysTarget, kv, DefaultLoggerWebhookKVS); err != nil {
return cfg, err
}
enabled, err := config.ParseBool(kv.Get(config.Enable))
if err != nil {
return cfg, err
}
if !enabled {
continue
}
err = config.EnsureCertAndKey(kv.Get(ClientCert), kv.Get(ClientKey))
if err != nil {
return cfg, err
}
queueSize, err := strconv.Atoi(kv.Get(QueueSize))
if err != nil {
return cfg, err
}
if queueSize <= 0 {
return cfg, errors.New("invalid queue_size value")
}
cfg.HTTP[starget] = http.Config{
Enabled: true,
Endpoint: kv.Get(Endpoint),
AuthToken: kv.Get(AuthToken),
ClientCert: kv.Get(ClientCert),
ClientKey: kv.Get(ClientKey),
Proxy: kv.Get(Proxy),
QueueSize: queueSize,
QueueDir: kv.Get(QueueDir),
Name: loggerTargetNamePrefix + starget,
}
}
return cfg, nil
}
func lookupAuditWebhookConfig(scfg config.Config, cfg Config) (Config, error) {
var loggerAuditTargets []string
envs := env.List(EnvAuditWebhookEndpoint)
for _, k := range envs {
target := strings.TrimPrefix(k, EnvAuditWebhookEndpoint+config.Default)
if target == EnvAuditWebhookEndpoint {
target = config.Default
}
loggerAuditTargets = append(loggerAuditTargets, target)
}
for _, target := range loggerAuditTargets {
if v, ok := cfg.AuditWebhook[target]; ok && v.Enabled {
// This target is already enabled using the
// legacy environment variables, ignore.
continue
}
enable, err := config.ParseBool(getCfgVal(EnvAuditWebhookEnable, target, ""))
if err != nil || !enable {
continue
}
clientCert := getCfgVal(EnvAuditWebhookClientCert, target, "")
clientKey := getCfgVal(EnvAuditWebhookClientKey, target, "")
err = config.EnsureCertAndKey(clientCert, clientKey)
if err != nil {
return cfg, err
}
queueSizeCfgVal := getCfgVal(EnvAuditWebhookQueueSize, target, "100000")
queueSize, err := strconv.Atoi(queueSizeCfgVal)
if err != nil {
return cfg, err
}
if queueSize <= 0 {
return cfg, errors.New("invalid queue_size value")
}
cfg.AuditWebhook[target] = http.Config{
Enabled: true,
Endpoint: getCfgVal(EnvAuditWebhookEndpoint, target, ""),
AuthToken: getCfgVal(EnvAuditWebhookAuthToken, target, ""),
ClientCert: clientCert,
ClientKey: clientKey,
QueueSize: queueSize,
QueueDir: getCfgVal(EnvAuditWebhookQueueDir, target, ""),
Name: auditTargetNamePrefix + target,
}
}
for starget, kv := range scfg[config.AuditWebhookSubSys] {
if l, ok := cfg.AuditWebhook[starget]; ok && l.Enabled {
// Ignore this audit config since another target
// with the same name is already loaded and enabled
// in the shell environment.
continue
}
subSysTarget := config.AuditWebhookSubSys
if starget != config.Default {
subSysTarget = config.AuditWebhookSubSys + config.SubSystemSeparator + starget
}
if err := config.CheckValidKeys(subSysTarget, kv, DefaultAuditWebhookKVS); err != nil {
return cfg, err
}
enabled, err := config.ParseBool(kv.Get(config.Enable))
if err != nil {
return cfg, err
}
if !enabled {
continue
}
err = config.EnsureCertAndKey(kv.Get(ClientCert), kv.Get(ClientKey))
if err != nil {
return cfg, err
}
queueSize, err := strconv.Atoi(kv.Get(QueueSize))
if err != nil {
return cfg, err
}
if queueSize <= 0 {
return cfg, errors.New("invalid queue_size value")
}
cfg.AuditWebhook[starget] = http.Config{
Enabled: true,
Endpoint: kv.Get(Endpoint),
AuthToken: kv.Get(AuthToken),
ClientCert: kv.Get(ClientCert),
ClientKey: kv.Get(ClientKey),
QueueSize: queueSize,
QueueDir: kv.Get(QueueDir),
Name: auditTargetNamePrefix + starget,
}
}
return cfg, nil
}
// LookupConfigForSubSys - lookup logger config, override with ENVs if set, for the given sub-system
func LookupConfigForSubSys(scfg config.Config, subSys string) (cfg Config, err error) {
switch subSys {
case config.LoggerWebhookSubSys:
cfg = lookupLegacyConfigForSubSys(config.LoggerWebhookSubSys)
if cfg, err = lookupLoggerWebhookConfig(scfg, cfg); err != nil {
return cfg, err
}
case config.AuditWebhookSubSys:
cfg = lookupLegacyConfigForSubSys(config.AuditWebhookSubSys)
if cfg, err = lookupAuditWebhookConfig(scfg, cfg); err != nil {
return cfg, err
}
case config.AuditKafkaSubSys:
cfg.AuditKafka = make(map[string]kafka.Config)
if cfg, err = lookupAuditKafkaConfig(scfg, cfg); err != nil {
return cfg, err
}
}
return cfg, nil
}
// ValidateSubSysConfig - validates logger related config of given sub-system
func ValidateSubSysConfig(scfg config.Config, subSys string) error {
// Lookup for legacy environment variables first
_, err := LookupConfigForSubSys(scfg, subSys)
return err
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"encoding/json"
"io"
"net/http"
"strings"
"time"
"github.com/minio/madmin-go/v3"
"github.com/minio/mux"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/bucket/policy"
iampolicy "github.com/minio/pkg/iam/policy"
)
// SiteReplicationAdd - PUT /minio/admin/v3/site-replication/add
func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SiteReplicationAdd")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationAddAction)
if objectAPI == nil {
return
}
var sites []madmin.PeerSite
if err := parseJSONBody(ctx, r.Body, &sites, cred.SecretKey); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
status, err := globalSiteReplicationSys.AddPeerClusters(ctx, sites)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
body, err := json.Marshal(status)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, body)
}
// SRPeerJoin - PUT /minio/admin/v3/site-replication/join
//
// used internally to tell current cluster to enable SR with
// the provided peer clusters and service account.
func (a adminAPIHandlers) SRPeerJoin(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SRPeerJoin")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationAddAction)
if objectAPI == nil {
return
}
var joinArg madmin.SRPeerJoinReq
if err := parseJSONBody(ctx, r.Body, &joinArg, cred.SecretKey); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err := globalSiteReplicationSys.PeerJoinReq(ctx, joinArg); err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
// SRPeerBucketOps - PUT /minio/admin/v3/site-replication/bucket-ops?bucket=x&operation=y
func (a adminAPIHandlers) SRPeerBucketOps(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SRPeerBucketOps")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationOperationAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
operation := madmin.BktOp(vars["operation"])
var err error
switch operation {
default:
err = errSRInvalidRequest(errInvalidArgument)
case madmin.MakeWithVersioningBktOp:
createdAt, cerr := time.Parse(time.RFC3339Nano, strings.TrimSpace(r.Form.Get("createdAt")))
if cerr != nil {
createdAt = timeSentinel
}
opts := MakeBucketOptions{
LockEnabled: r.Form.Get("lockEnabled") == "true",
VersioningEnabled: r.Form.Get("versioningEnabled") == "true",
ForceCreate: r.Form.Get("forceCreate") == "true",
CreatedAt: createdAt,
}
err = globalSiteReplicationSys.PeerBucketMakeWithVersioningHandler(ctx, bucket, opts)
case madmin.ConfigureReplBktOp:
err = globalSiteReplicationSys.PeerBucketConfigureReplHandler(ctx, bucket)
case madmin.DeleteBucketBktOp, madmin.ForceDeleteBucketBktOp:
err = globalSiteReplicationSys.PeerBucketDeleteHandler(ctx, bucket, DeleteBucketOptions{
Force: operation == madmin.ForceDeleteBucketBktOp,
SRDeleteOp: getSRBucketDeleteOp(true),
})
case madmin.PurgeDeletedBucketOp:
globalSiteReplicationSys.purgeDeletedBucket(ctx, objectAPI, bucket)
}
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
// SRPeerReplicateIAMItem - PUT /minio/admin/v3/site-replication/iam-item
func (a adminAPIHandlers) SRPeerReplicateIAMItem(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SRPeerReplicateIAMItem")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationOperationAction)
if objectAPI == nil {
return
}
var item madmin.SRIAMItem
if err := parseJSONBody(ctx, r.Body, &item, ""); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
var err error
switch item.Type {
default:
err = errSRInvalidRequest(errInvalidArgument)
case madmin.SRIAMItemPolicy:
if item.Policy == nil {
err = globalSiteReplicationSys.PeerAddPolicyHandler(ctx, item.Name, nil, item.UpdatedAt)
} else {
policy, perr := iampolicy.ParseConfig(bytes.NewReader(item.Policy))
if perr != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, perr), r.URL)
return
}
if policy.IsEmpty() {
err = globalSiteReplicationSys.PeerAddPolicyHandler(ctx, item.Name, nil, item.UpdatedAt)
} else {
err = globalSiteReplicationSys.PeerAddPolicyHandler(ctx, item.Name, policy, item.UpdatedAt)
}
}
case madmin.SRIAMItemSvcAcc:
err = globalSiteReplicationSys.PeerSvcAccChangeHandler(ctx, item.SvcAccChange, item.UpdatedAt)
case madmin.SRIAMItemPolicyMapping:
err = globalSiteReplicationSys.PeerPolicyMappingHandler(ctx, item.PolicyMapping, item.UpdatedAt)
case madmin.SRIAMItemSTSAcc:
err = globalSiteReplicationSys.PeerSTSAccHandler(ctx, item.STSCredential, item.UpdatedAt)
case madmin.SRIAMItemIAMUser:
err = globalSiteReplicationSys.PeerIAMUserChangeHandler(ctx, item.IAMUser, item.UpdatedAt)
case madmin.SRIAMItemGroupInfo:
err = globalSiteReplicationSys.PeerGroupInfoChangeHandler(ctx, item.GroupInfo, item.UpdatedAt)
}
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
// SRPeerReplicateBucketItem - PUT /minio/admin/v3/site-replication/bucket-meta
func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SRPeerReplicateBucketItem")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationOperationAction)
if objectAPI == nil {
return
}
var item madmin.SRBucketMeta
if err := parseJSONBody(ctx, r.Body, &item, ""); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
var err error
switch item.Type {
default:
err = errSRInvalidRequest(errInvalidArgument)
case madmin.SRBucketMetaTypePolicy:
if item.Policy == nil {
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, nil, item.UpdatedAt)
} else {
bktPolicy, berr := policy.ParseConfig(bytes.NewReader(item.Policy), item.Bucket)
if berr != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, berr), r.URL)
return
}
if bktPolicy.IsEmpty() {
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, nil, item.UpdatedAt)
} else {
err = globalSiteReplicationSys.PeerBucketPolicyHandler(ctx, item.Bucket, bktPolicy, item.UpdatedAt)
}
}
case madmin.SRBucketMetaTypeQuotaConfig:
if item.Quota == nil {
err = globalSiteReplicationSys.PeerBucketQuotaConfigHandler(ctx, item.Bucket, nil, item.UpdatedAt)
} else {
quotaConfig, err := parseBucketQuota(item.Bucket, item.Quota)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = globalSiteReplicationSys.PeerBucketQuotaConfigHandler(ctx, item.Bucket, quotaConfig, item.UpdatedAt); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}
case madmin.SRBucketMetaTypeVersionConfig:
err = globalSiteReplicationSys.PeerBucketVersioningHandler(ctx, item.Bucket, item.Versioning, item.UpdatedAt)
case madmin.SRBucketMetaTypeTags:
err = globalSiteReplicationSys.PeerBucketTaggingHandler(ctx, item.Bucket, item.Tags, item.UpdatedAt)
case madmin.SRBucketMetaTypeObjectLockConfig:
err = globalSiteReplicationSys.PeerBucketObjectLockConfigHandler(ctx, item.Bucket, item.ObjectLockConfig, item.UpdatedAt)
case madmin.SRBucketMetaTypeSSEConfig:
err = globalSiteReplicationSys.PeerBucketSSEConfigHandler(ctx, item.Bucket, item.SSEConfig, item.UpdatedAt)
}
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
// SiteReplicationInfo - GET /minio/admin/v3/site-replication/info
func (a adminAPIHandlers) SiteReplicationInfo(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SiteReplicationInfo")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationInfoAction)
if objectAPI == nil {
return
}
info, err := globalSiteReplicationSys.GetClusterInfo(ctx)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = json.NewEncoder(w).Encode(info); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
func (a adminAPIHandlers) SRPeerGetIDPSettings(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SiteReplicationGetIDPSettings")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationAddAction)
if objectAPI == nil {
return
}
idpSettings := globalSiteReplicationSys.GetIDPSettings(ctx)
if err := json.NewEncoder(w).Encode(idpSettings); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptionKey string) error {
data, err := io.ReadAll(body)
if err != nil {
return SRError{
Cause: err,
Code: ErrSiteReplicationInvalidRequest,
}
}
if encryptionKey != "" {
data, err = madmin.DecryptData(encryptionKey, bytes.NewReader(data))
if err != nil {
logger.LogIf(ctx, err)
return SRError{
Cause: err,
Code: ErrSiteReplicationInvalidRequest,
}
}
}
return json.Unmarshal(data, v)
}
// SiteReplicationStatus - GET /minio/admin/v3/site-replication/status
func (a adminAPIHandlers) SiteReplicationStatus(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SiteReplicationStatus")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationInfoAction)
if objectAPI == nil {
return
}
opts := getSRStatusOptions(r)
// default options to all if status options are unset for backward compatibility
var dfltOpts madmin.SRStatusOptions
if opts == dfltOpts {
opts.Buckets = true
opts.Users = true
opts.Policies = true
opts.Groups = true
}
info, err := globalSiteReplicationSys.SiteReplicationStatus(ctx, objectAPI, opts)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = json.NewEncoder(w).Encode(info); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
// SiteReplicationMetaInfo - GET /minio/admin/v3/site-replication/metainfo
func (a adminAPIHandlers) SiteReplicationMetaInfo(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SiteReplicationMetaInfo")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationInfoAction)
if objectAPI == nil {
return
}
opts := getSRStatusOptions(r)
info, err := globalSiteReplicationSys.SiteReplicationMetaInfo(ctx, objectAPI, opts)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = json.NewEncoder(w).Encode(info); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
// SiteReplicationEdit - PUT /minio/admin/v3/site-replication/edit
func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SiteReplicationEdit")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationAddAction)
if objectAPI == nil {
return
}
var site madmin.PeerInfo
err := parseJSONBody(ctx, r.Body, &site, cred.SecretKey)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
status, err := globalSiteReplicationSys.EditPeerCluster(ctx, site)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
body, err := json.Marshal(status)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, body)
}
// SRPeerEdit - PUT /minio/admin/v3/site-replication/peer/edit
//
// used internally to tell current cluster to update endpoint for peer
func (a adminAPIHandlers) SRPeerEdit(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SRPeerEdit")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationAddAction)
if objectAPI == nil {
return
}
var pi madmin.PeerInfo
if err := parseJSONBody(ctx, r.Body, &pi, ""); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err := globalSiteReplicationSys.PeerEditReq(ctx, pi); err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
func getSRStatusOptions(r *http.Request) (opts madmin.SRStatusOptions) {
q := r.Form
opts.Buckets = q.Get("buckets") == "true"
opts.Policies = q.Get("policies") == "true"
opts.Groups = q.Get("groups") == "true"
opts.Users = q.Get("users") == "true"
opts.Entity = madmin.GetSREntityType(q.Get("entity"))
opts.EntityValue = q.Get("entityvalue")
opts.ShowDeleted = q.Get("showDeleted") == "true"
return
}
// SiteReplicationRemove - PUT /minio/admin/v3/site-replication/remove
func (a adminAPIHandlers) SiteReplicationRemove(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SiteReplicationRemove")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationRemoveAction)
if objectAPI == nil {
return
}
var rreq madmin.SRRemoveReq
err := parseJSONBody(ctx, r.Body, &rreq, "")
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
status, err := globalSiteReplicationSys.RemovePeerCluster(ctx, objectAPI, rreq)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
body, err := json.Marshal(status)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, body)
}
// SRPeerRemove - PUT /minio/admin/v3/site-replication/peer/remove
//
// used internally to tell current cluster to update endpoint for peer
func (a adminAPIHandlers) SRPeerRemove(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SRPeerRemove")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationRemoveAction)
if objectAPI == nil {
return
}
var req madmin.SRRemoveReq
if err := parseJSONBody(ctx, r.Body, &req, ""); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err := globalSiteReplicationSys.InternalRemoveReq(ctx, objectAPI, req); err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
// SiteReplicationResyncOp - PUT /minio/admin/v3/site-replication/resync/op
func (a adminAPIHandlers) SiteReplicationResyncOp(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SiteReplicationResyncOp")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SiteReplicationResyncAction)
if objectAPI == nil {
return
}
var peerSite madmin.PeerInfo
if err := parseJSONBody(ctx, r.Body, &peerSite, ""); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
vars := mux.Vars(r)
op := madmin.SiteResyncOp(vars["operation"])
var (
status madmin.SRResyncOpStatus
err error
)
switch op {
case madmin.SiteResyncStart:
status, err = globalSiteReplicationSys.startResync(ctx, objectAPI, peerSite)
case madmin.SiteResyncCancel:
status, err = globalSiteReplicationSys.cancelResync(ctx, objectAPI, peerSite)
default:
err = errSRInvalidRequest(errInvalidArgument)
}
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
body, err := json.Marshal(status)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, body)
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package http
import (
"fmt"
"net"
"runtime"
"strconv"
"testing"
)
// Tests for port availability logic written for server startup sequence.
func TestCheckPortAvailability(t *testing.T) {
if runtime.GOOS != "linux" {
t.Skip()
}
l, err := net.Listen("tcp", "localhost:0") // ask kernel for a free port.
if err != nil {
t.Fatal(err)
}
defer l.Close()
port := l.Addr().(*net.TCPAddr).Port
testCases := []struct {
host string
port int
expectedErr error
}{
{"", port, fmt.Errorf("listen tcp :%v: bind: address already in use", port)},
{"127.0.0.1", port, fmt.Errorf("listen tcp 127.0.0.1:%v: bind: address already in use", port)},
}
for _, testCase := range testCases {
err := CheckPortAvailability(testCase.host, strconv.Itoa(testCase.port), TCPOptions{})
switch {
case testCase.expectedErr == nil:
if err != nil {
t.Fatalf("error: expected = <nil>, got = %v", err)
}
case err == nil:
t.Fatalf("error: expected = %v, got = <nil>", testCase.expectedErr)
case testCase.expectedErr.Error() != err.Error():
t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err)
}
}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package lifecycle
import (
"encoding/xml"
"fmt"
"io"
"net/http"
"sort"
"strings"
"time"
"github.com/google/uuid"
xhttp "github.com/minio/minio/internal/http"
)
var (
errLifecycleTooManyRules = Errorf("Lifecycle configuration allows a maximum of 1000 rules")
errLifecycleNoRule = Errorf("Lifecycle configuration should have at least one rule")
errLifecycleDuplicateID = Errorf("Rule ID must be unique. Found same ID for more than one rule")
errXMLNotWellFormed = Errorf("The XML you provided was not well-formed or did not validate against our published schema")
)
const (
// TransitionComplete marks completed transition
TransitionComplete = "complete"
// TransitionPending - transition is yet to be attempted
TransitionPending = "pending"
)
// Action represents a delete action or other transition
// actions that will be implemented later.
type Action int
//go:generate stringer -type Action $GOFILE
const (
// NoneAction means no action required after evaluating lifecycle rules
NoneAction Action = iota
// DeleteAction means the object needs to be removed after evaluating lifecycle rules
DeleteAction
// DeleteVersionAction deletes a particular version
DeleteVersionAction
// TransitionAction transitions a particular object after evaluating lifecycle transition rules
TransitionAction
// TransitionVersionAction transitions a particular object version after evaluating lifecycle transition rules
TransitionVersionAction
// DeleteRestoredAction means the temporarily restored object needs to be removed after evaluating lifecycle rules
DeleteRestoredAction
// DeleteRestoredVersionAction deletes a particular version that was temporarily restored
DeleteRestoredVersionAction
// ActionCount must be the last action and shouldn't be used as a regular action.
ActionCount
)
// DeleteRestored - Returns true if action demands delete on restored objects
func (a Action) DeleteRestored() bool {
return a == DeleteRestoredAction || a == DeleteRestoredVersionAction
}
// DeleteVersioned - Returns true if action demands delete on a versioned object
func (a Action) DeleteVersioned() bool {
return a == DeleteVersionAction || a == DeleteRestoredVersionAction
}
// Delete - Returns true if action demands delete on all objects (including restored)
func (a Action) Delete() bool {
if a.DeleteRestored() {
return true
}
return a == DeleteVersionAction || a == DeleteAction
}
// Lifecycle - Configuration for bucket lifecycle.
type Lifecycle struct {
XMLName xml.Name `xml:"LifecycleConfiguration"`
Rules []Rule `xml:"Rule"`
}
// HasTransition returns 'true' if lifecycle document has Transition enabled.
func (lc Lifecycle) HasTransition() bool {
for _, rule := range lc.Rules {
if rule.Transition.IsEnabled() {
return true
}
}
return false
}
// UnmarshalXML - decodes XML data.
func (lc *Lifecycle) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) {
switch start.Name.Local {
case "LifecycleConfiguration", "BucketLifecycleConfiguration":
default:
return xml.UnmarshalError(fmt.Sprintf("expected element type <LifecycleConfiguration>/<BucketLifecycleConfiguration> but have <%s>",
start.Name.Local))
}
for {
// Read tokens from the XML document in a stream.
t, err := d.Token()
if err != nil {
if err == io.EOF {
break
}
return err
}
if se, ok := t.(xml.StartElement); ok {
switch se.Name.Local {
case "Rule":
var r Rule
if err = d.DecodeElement(&r, &se); err != nil {
return err
}
lc.Rules = append(lc.Rules, r)
default:
return xml.UnmarshalError(fmt.Sprintf("expected element type <Rule> but have <%s>", se.Name.Local))
}
}
}
return nil
}
// HasActiveRules - returns whether lc has active rules at any level below or at prefix.
func (lc Lifecycle) HasActiveRules(prefix string) bool {
if len(lc.Rules) == 0 {
return false
}
for _, rule := range lc.Rules {
if rule.Status == Disabled {
continue
}
if len(prefix) > 0 && len(rule.GetPrefix()) > 0 {
// If recursive, we can skip this rule if it doesn't match the tested prefix.
if !strings.HasPrefix(prefix, rule.GetPrefix()) && !strings.HasPrefix(rule.GetPrefix(), prefix) {
continue
}
}
if rule.NoncurrentVersionExpiration.NoncurrentDays > 0 {
return true
}
if rule.NoncurrentVersionExpiration.NewerNoncurrentVersions > 0 {
return true
}
if !rule.NoncurrentVersionTransition.IsNull() {
return true
}
if rule.Expiration.IsNull() && rule.Transition.IsNull() {
continue
}
if !rule.Expiration.IsDateNull() && rule.Expiration.Date.Before(time.Now().UTC()) {
return true
}
if !rule.Expiration.IsDaysNull() {
return true
}
if !rule.Transition.IsDateNull() && rule.Transition.Date.Before(time.Now().UTC()) {
return true
}
if !rule.Transition.IsNull() { // this allows for Transition.Days to be zero.
return true
}
}
return false
}
// ParseLifecycleConfigWithID - parses for a Lifecycle config and assigns
// unique id to rules with empty ID.
func ParseLifecycleConfigWithID(r io.Reader) (*Lifecycle, error) {
var lc Lifecycle
if err := xml.NewDecoder(r).Decode(&lc); err != nil {
return nil, err
}
// assign a unique id for rules with empty ID
for i := range lc.Rules {
if lc.Rules[i].ID == "" {
lc.Rules[i].ID = uuid.New().String()
}
}
return &lc, nil
}
// ParseLifecycleConfig - parses data in given reader to Lifecycle.
func ParseLifecycleConfig(reader io.Reader) (*Lifecycle, error) {
var lc Lifecycle
if err := xml.NewDecoder(reader).Decode(&lc); err != nil {
return nil, err
}
return &lc, nil
}
// Validate - validates the lifecycle configuration
func (lc Lifecycle) Validate() error {
// Lifecycle config can't have more than 1000 rules
if len(lc.Rules) > 1000 {
return errLifecycleTooManyRules
}
// Lifecycle config should have at least one rule
if len(lc.Rules) == 0 {
return errLifecycleNoRule
}
// Validate all the rules in the lifecycle config
for _, r := range lc.Rules {
if err := r.Validate(); err != nil {
return err
}
}
// Make sure Rule ID is unique
for i := range lc.Rules {
if i == len(lc.Rules)-1 {
break
}
otherRules := lc.Rules[i+1:]
for _, otherRule := range otherRules {
if lc.Rules[i].ID == otherRule.ID {
return errLifecycleDuplicateID
}
}
}
return nil
}
// FilterRules returns the rules filtered by the status, prefix and tags
func (lc Lifecycle) FilterRules(obj ObjectOpts) []Rule {
if obj.Name == "" {
return nil
}
var rules []Rule
for _, rule := range lc.Rules {
if rule.Status == Disabled {
continue
}
if !strings.HasPrefix(obj.Name, rule.GetPrefix()) {
continue
}
if !rule.Filter.TestTags(obj.UserTags) {
continue
}
rules = append(rules, rule)
}
return rules
}
// ObjectOpts provides information to deduce the lifecycle actions
// which can be triggered on the resultant object.
type ObjectOpts struct {
Name string
UserTags string
ModTime time.Time
VersionID string
IsLatest bool
DeleteMarker bool
NumVersions int
SuccessorModTime time.Time
TransitionStatus string
RestoreOngoing bool
RestoreExpires time.Time
}
// ExpiredObjectDeleteMarker returns true if an object version referred to by o
// is the only version remaining and is a delete marker. It returns false
// otherwise.
func (o ObjectOpts) ExpiredObjectDeleteMarker() bool {
return o.DeleteMarker && o.NumVersions == 1
}
// Event contains a lifecycle action with associated info
type Event struct {
Action Action
RuleID string
Due time.Time
NoncurrentDays int
NewerNoncurrentVersions int
StorageClass string
}
// Eval returns the lifecycle event applicable now.
func (lc Lifecycle) Eval(obj ObjectOpts) Event {
return lc.eval(obj, time.Now().UTC())
}
// eval returns the lifecycle event applicable at the given now. If now is the
// zero value of time.Time, it returns the upcoming lifecycle event.
func (lc Lifecycle) eval(obj ObjectOpts, now time.Time) Event {
var events []Event
if obj.ModTime.IsZero() {
return Event{}
}
// Handle expiry of restored object; NB Restored Objects have expiry set on
// them as part of RestoreObject API. They aren't governed by lifecycle
// rules.
if !obj.RestoreExpires.IsZero() && now.After(obj.RestoreExpires) {
action := DeleteRestoredAction
if !obj.IsLatest {
action = DeleteRestoredVersionAction
}
events = append(events, Event{
Action: action,
Due: now,
})
}
for _, rule := range lc.FilterRules(obj) {
if obj.ExpiredObjectDeleteMarker() {
if rule.Expiration.DeleteMarker.val {
// Indicates whether MinIO will remove a delete marker with no noncurrent versions.
// Only latest marker is removed. If set to true, the delete marker will be expired;
// if set to false the policy takes no action. This cannot be specified with Days or
// Date in a Lifecycle Expiration Policy.
events = append(events, Event{
Action: DeleteVersionAction,
RuleID: rule.ID,
Due: now,
})
// No other conflicting actions apply to an expired object delete marker
break
}
if !rule.Expiration.IsDaysNull() {
// Specifying the Days tag will automatically perform ExpiredObjectDeleteMarker cleanup
// once delete markers are old enough to satisfy the age criteria.
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/lifecycle-configuration-examples.html
if expectedExpiry := ExpectedExpiryTime(obj.ModTime, int(rule.Expiration.Days)); now.IsZero() || now.After(expectedExpiry) {
events = append(events, Event{
Action: DeleteVersionAction,
RuleID: rule.ID,
Due: expectedExpiry,
})
// No other conflicting actions apply to an expired object delete marker
break
}
}
}
// Skip rules with newer noncurrent versions specified. These rules are
// not handled at an individual version level. eval applies only to a
// specific version.
if !obj.IsLatest && rule.NoncurrentVersionExpiration.NewerNoncurrentVersions > 0 {
continue
}
if !obj.IsLatest && !rule.NoncurrentVersionExpiration.IsDaysNull() {
// Non current versions should be deleted if their age exceeds non current days configuration
// https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions
if expectedExpiry := ExpectedExpiryTime(obj.SuccessorModTime, int(rule.NoncurrentVersionExpiration.NoncurrentDays)); now.IsZero() || now.After(expectedExpiry) {
events = append(events, Event{
Action: DeleteVersionAction,
RuleID: rule.ID,
Due: expectedExpiry,
})
}
}
if !obj.IsLatest && !rule.NoncurrentVersionTransition.IsNull() {
if !obj.DeleteMarker && obj.TransitionStatus != TransitionComplete {
// Non current versions should be transitioned if their age exceeds non current days configuration
// https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions
if due, ok := rule.NoncurrentVersionTransition.NextDue(obj); ok && (now.IsZero() || now.After(due)) {
events = append(events, Event{
Action: TransitionVersionAction,
RuleID: rule.ID,
Due: due,
StorageClass: rule.NoncurrentVersionTransition.StorageClass,
})
}
}
}
// Remove the object or simply add a delete marker (once) in a versioned bucket
if obj.IsLatest && !obj.DeleteMarker {
switch {
case !rule.Expiration.IsDateNull():
if now.IsZero() || now.After(rule.Expiration.Date.Time) {
events = append(events, Event{
Action: DeleteAction,
RuleID: rule.ID,
Due: rule.Expiration.Date.Time,
})
}
case !rule.Expiration.IsDaysNull():
if expectedExpiry := ExpectedExpiryTime(obj.ModTime, int(rule.Expiration.Days)); now.IsZero() || now.After(expectedExpiry) {
events = append(events, Event{
Action: DeleteAction,
RuleID: rule.ID,
Due: expectedExpiry,
})
}
}
if obj.TransitionStatus != TransitionComplete {
if due, ok := rule.Transition.NextDue(obj); ok && (now.IsZero() || now.After(due)) {
events = append(events, Event{
Action: TransitionAction,
RuleID: rule.ID,
Due: due,
StorageClass: rule.Transition.StorageClass,
})
}
}
}
}
if len(events) > 0 {
sort.Slice(events, func(i, j int) bool {
if events[i].Due.Equal(events[j].Due) {
// Prefer Expiration over Transition for both current
// and noncurrent versions
switch events[i].Action {
case DeleteAction, DeleteVersionAction:
return true
}
switch events[j].Action {
case DeleteAction, DeleteVersionAction:
return false
}
return true
}
// Prefer earlier occurring event
return events[i].Due.Before(events[j].Due)
})
return events[0]
}
return Event{
Action: NoneAction,
}
}
// ExpectedExpiryTime calculates the expiry, transition or restore date/time based on a object modtime.
// The expected transition or restore time is always a midnight time following the object
// modification time plus the number of transition/restore days.
//
// e.g. If the object modtime is `Thu May 21 13:42:50 GMT 2020` and the object should
// transition in 1 day, then the expected transition time is `Fri, 23 May 2020 00:00:00 GMT`
func ExpectedExpiryTime(modTime time.Time, days int) time.Time {
if days == 0 {
return modTime
}
t := modTime.UTC().Add(time.Duration(days+1) * 24 * time.Hour)
return t.Truncate(24 * time.Hour)
}
// SetPredictionHeaders sets time to expiry and transition headers on w for a
// given obj.
func (lc Lifecycle) SetPredictionHeaders(w http.ResponseWriter, obj ObjectOpts) {
event := lc.eval(obj, time.Time{})
switch event.Action {
case DeleteAction, DeleteVersionAction:
w.Header()[xhttp.AmzExpiration] = []string{
fmt.Sprintf(`expiry-date="%s", rule-id="%s"`, event.Due.Format(http.TimeFormat), event.RuleID),
}
case TransitionAction, TransitionVersionAction:
w.Header()[xhttp.MinIOTransition] = []string{
fmt.Sprintf(`transition-date="%s", rule-id="%s"`, event.Due.Format(http.TimeFormat), event.RuleID),
}
}
}
// NoncurrentVersionsExpirationLimit returns the number of noncurrent versions
// to be retained from the first applicable rule per S3 behavior.
func (lc Lifecycle) NoncurrentVersionsExpirationLimit(obj ObjectOpts) Event {
for _, rule := range lc.FilterRules(obj) {
if rule.NoncurrentVersionExpiration.NewerNoncurrentVersions == 0 {
continue
}
return Event{
Action: DeleteVersionAction,
RuleID: rule.ID,
NoncurrentDays: int(rule.NoncurrentVersionExpiration.NoncurrentDays),
NewerNoncurrentVersions: rule.NoncurrentVersionExpiration.NewerNoncurrentVersions,
}
}
return Event{}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ldap
import (
"github.com/minio/minio/internal/config"
)
// LegacyConfig contains AD/LDAP server connectivity information from old config
// V33.
type LegacyConfig struct {
Enabled bool `json:"enabled"`
// E.g. "ldap.minio.io:636"
ServerAddr string `json:"serverAddr"`
// User DN search parameters
UserDNSearchBaseDistName string `json:"userDNSearchBaseDN"`
UserDNSearchBaseDistNames []string `json:"-"` // Generated field
UserDNSearchFilter string `json:"userDNSearchFilter"`
// Group search parameters
GroupSearchBaseDistName string `json:"groupSearchBaseDN"`
GroupSearchBaseDistNames []string `json:"-"` // Generated field
GroupSearchFilter string `json:"groupSearchFilter"`
// Lookup bind LDAP service account
LookupBindDN string `json:"lookupBindDN"`
LookupBindPassword string `json:"lookupBindPassword"`
}
// SetIdentityLDAP - One time migration code needed, for migrating from older config to new for LDAPConfig.
func SetIdentityLDAP(s config.Config, ldapArgs LegacyConfig) {
if !ldapArgs.Enabled {
// ldap not enabled no need to preserve it in new settings.
return
}
s[config.IdentityLDAPSubSys][config.Default] = config.KVS{
config.KV{
Key: ServerAddr,
Value: ldapArgs.ServerAddr,
},
config.KV{
Key: GroupSearchFilter,
Value: ldapArgs.GroupSearchFilter,
},
config.KV{
Key: GroupSearchBaseDN,
Value: ldapArgs.GroupSearchBaseDistName,
},
}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"sync"
"time"
"github.com/minio/madmin-go/v3"
)
const (
mrfOpsQueueSize = 100000
)
// partialOperation is a successful upload/delete of an object
// but not written in all disks (having quorum)
type partialOperation struct {
bucket string
object string
versionID string
allVersions bool
setIndex, poolIndex int
queued time.Time
}
// mrfState sncapsulates all the information
// related to the global background MRF.
type mrfState struct {
ctx context.Context
pools *erasureServerPools
mu sync.Mutex
opCh chan partialOperation
}
// Initialize healing MRF subsystem
func (m *mrfState) init(ctx context.Context, objAPI ObjectLayer) {
m.mu.Lock()
defer m.mu.Unlock()
m.ctx = ctx
m.opCh = make(chan partialOperation, mrfOpsQueueSize)
var ok bool
m.pools, ok = objAPI.(*erasureServerPools)
if ok {
go m.healRoutine()
}
}
// Add a partial S3 operation (put/delete) when one or more disks are offline.
func (m *mrfState) addPartialOp(op partialOperation) {
if m == nil {
return
}
select {
case m.opCh <- op:
default:
}
}
var healSleeper = newDynamicSleeper(5, time.Second, false)
// healRoutine listens to new disks reconnection events and
// issues healing requests for queued objects belonging to the
// corresponding erasure set
func (m *mrfState) healRoutine() {
for {
select {
case <-m.ctx.Done():
return
case u, ok := <-m.opCh:
if !ok {
return
}
now := time.Now()
if now.Sub(u.queued) < time.Second {
// let recently failed networks to reconnect
// making MRF wait for 1s before retrying,
// i.e 4 reconnect attempts.
time.Sleep(1 * time.Second)
}
// wait on timer per heal
wait := healSleeper.Timer(context.Background())
if u.object == "" {
healBucket(u.bucket, madmin.HealNormalScan)
} else {
if u.allVersions {
m.pools.serverPools[u.poolIndex].sets[u.setIndex].listAndHeal(u.bucket, u.object, healObjectVersionsDisparity)
} else {
healObject(u.bucket, u.object, u.versionID, madmin.HealNormalScan)
}
}
wait()
}
}
}
// Initialize healing MRF
func initHealMRF(ctx context.Context, obj ObjectLayer) {
globalMRFState.init(ctx, obj)
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package versioning
import (
"encoding/xml"
"strings"
"testing"
)
func TestParseConfig(t *testing.T) {
testcases := []struct {
input string
err error
excludedPrefixes []string
excludeFolders bool
}{
{
input: `<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>Enabled</Status>
</VersioningConfiguration>`,
err: nil,
},
{
input: `<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>Enabled</Status>
<ExcludedPrefixes>
<Prefix>path/to/my/workload/_staging/</Prefix>
</ExcludedPrefixes>
<ExcludedPrefixes>
<Prefix>path/to/my/workload/_temporary/</Prefix>
</ExcludedPrefixes>
</VersioningConfiguration>`,
err: nil,
excludedPrefixes: []string{"path/to/my/workload/_staging/", "path/to/my/workload/_temporary/"},
},
{
input: `<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>Suspended</Status>
<ExcludedPrefixes>
<Prefix>path/to/my/workload/_staging</Prefix>
</ExcludedPrefixes>
</VersioningConfiguration>`,
err: errExcludedPrefixNotSupported,
},
{
input: `<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>Enabled</Status>
<ExcludedPrefixes>
<Prefix>path/to/my/workload/_staging/ab/</Prefix>
</ExcludedPrefixes>
<ExcludedPrefixes>
<Prefix>path/to/my/workload/_staging/cd/</Prefix>
</ExcludedPrefixes>
<ExcludedPrefixes>
<Prefix>path/to/my/workload/_staging/ef/</Prefix>
</ExcludedPrefixes>
<ExcludedPrefixes>
<Prefix>path/to/my/workload/_staging/gh/</Prefix>
</ExcludedPrefixes>
<ExcludedPrefixes>
<Prefix>path/to/my/workload/_staging/ij/</Prefix>
</ExcludedPrefixes>
<ExcludedPrefixes>
<Prefix>path/to/my/workload/_staging/kl/</Prefix>
</ExcludedPrefixes>
<ExcludedPrefixes>
<Prefix>path/to/my/workload/_staging/mn/</Prefix>
</ExcludedPrefixes>
<ExcludedPrefixes>
<Prefix>path/to/my/workload/_staging/op/</Prefix>
</ExcludedPrefixes>
<ExcludedPrefixes>
<Prefix>path/to/my/workload/_staging/qr/</Prefix>
</ExcludedPrefixes>
<ExcludedPrefixes>
<Prefix>path/to/my/workload/_staging/st/</Prefix>
</ExcludedPrefixes>
<ExcludedPrefixes>
<Prefix>path/to/my/workload/_staging/uv/</Prefix>
</ExcludedPrefixes>
</VersioningConfiguration>`,
err: errTooManyExcludedPrefixes,
},
{
input: `<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>Enabled</Status>
<ExcludeFolders>true</ExcludeFolders>
<ExcludedPrefixes>
<Prefix>path/to/my/workload/_staging/</Prefix>
</ExcludedPrefixes>
<ExcludedPrefixes>
<Prefix>path/to/my/workload/_temporary/</Prefix>
</ExcludedPrefixes>
</VersioningConfiguration>`,
err: nil,
excludedPrefixes: []string{"path/to/my/workload/_staging/", "path/to/my/workload/_temporary/"},
excludeFolders: true,
},
}
for i, tc := range testcases {
var v *Versioning
var err error
v, err = ParseConfig(strings.NewReader(tc.input))
if tc.err != err {
t.Fatalf("Test %d: expected %v but got %v", i+1, tc.err, err)
}
if err != nil {
if tc.err == nil {
t.Fatalf("Test %d: failed due to %v", i+1, err)
}
} else {
if err := v.Validate(); tc.err != err {
t.Fatalf("Test %d: validation failed due to %v", i+1, err)
}
if len(tc.excludedPrefixes) > 0 {
var mismatch bool
if len(v.ExcludedPrefixes) != len(tc.excludedPrefixes) {
t.Fatalf("Test %d: Expected length of excluded prefixes %d but got %d", i+1, len(tc.excludedPrefixes), len(v.ExcludedPrefixes))
}
var i int
var eprefix string
for i, eprefix = range tc.excludedPrefixes {
if eprefix != v.ExcludedPrefixes[i].Prefix {
mismatch = true
break
}
}
if mismatch {
t.Fatalf("Test %d: Expected excluded prefix %s but got %s", i+1, tc.excludedPrefixes[i], v.ExcludedPrefixes[i].Prefix)
}
}
if tc.excludeFolders != v.ExcludeFolders {
t.Fatalf("Test %d: Expected ExcludeFoldersr=%v but got %v", i+1, tc.excludeFolders, v.ExcludeFolders)
}
}
}
}
func TestMarshalXML(t *testing.T) {
// Validates if Versioning with no excluded prefixes omits
// ExcludedPrefixes tags
v := Versioning{
Status: Enabled,
}
buf, err := xml.Marshal(v)
if err != nil {
t.Fatalf("Failed to marshal %v: %v", v, err)
}
str := string(buf)
if strings.Contains(str, "ExcludedPrefixes") {
t.Fatalf("XML shouldn't contain ExcludedPrefixes tag - %s", str)
}
}
func TestVersioningZero(t *testing.T) {
var v Versioning
if v.Enabled() {
t.Fatalf("Expected to be disabled but got enabled")
}
if v.Suspended() {
t.Fatalf("Expected to be disabled but got suspended")
}
}
func TestExcludeFolders(t *testing.T) {
v := Versioning{
Status: Enabled,
ExcludeFolders: true,
}
testPrefixes := []string{"jobs/output/_temporary/", "jobs/output/", "jobs/"}
for i, prefix := range testPrefixes {
if v.PrefixEnabled(prefix) || !v.PrefixSuspended(prefix) {
t.Fatalf("Test %d: Expected versioning to be excluded for %s", i+1, prefix)
}
}
// Test applicability for regular objects
if prefix := "prefix-1/obj-1"; !v.PrefixEnabled(prefix) || v.PrefixSuspended(prefix) {
t.Fatalf("Expected versioning to be enabled for %s", prefix)
}
// Test when ExcludeFolders is disabled
v.ExcludeFolders = false
for i, prefix := range testPrefixes {
if !v.PrefixEnabled(prefix) || v.PrefixSuspended(prefix) {
t.Fatalf("Test %d: Expected versioning to be enabled for %s", i+1, prefix)
}
}
}
func TestExcludedPrefixesMatch(t *testing.T) {
v := Versioning{
Status: Enabled,
ExcludedPrefixes: []ExcludedPrefix{{"*/_temporary/"}},
}
if err := v.Validate(); err != nil {
t.Fatalf("Invalid test versioning config %v: %v", v, err)
}
tests := []struct {
prefix string
excluded bool
}{
{
prefix: "app1-jobs/output/_temporary/attempt1/data.csv",
excluded: true,
},
{
prefix: "app1-jobs/output/final/attempt1/data.csv",
excluded: false,
},
}
for i, test := range tests {
if v.PrefixSuspended(test.prefix) != test.excluded {
if test.excluded {
t.Fatalf("Test %d: Expected prefix %s to be excluded from versioning", i+1, test.prefix)
} else {
t.Fatalf("Test %d: Expected prefix %s to have versioning enabled", i+1, test.prefix)
}
}
}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"encoding/json"
"errors"
"fmt"
"time"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/logger"
)
// BucketQuotaSys - map of bucket and quota configuration.
type BucketQuotaSys struct {
bucketStorageCache timedValue
}
// Get - Get quota configuration.
func (sys *BucketQuotaSys) Get(ctx context.Context, bucketName string) (*madmin.BucketQuota, error) {
qCfg, _, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucketName)
return qCfg, err
}
// NewBucketQuotaSys returns initialized BucketQuotaSys
func NewBucketQuotaSys() *BucketQuotaSys {
return &BucketQuotaSys{}
}
// Init initialize bucket quota.
func (sys *BucketQuotaSys) Init(objAPI ObjectLayer) {
sys.bucketStorageCache.Once.Do(func() {
// Set this to 10 secs since its enough, as scanner
// does not update the bucket usage values frequently.
sys.bucketStorageCache.TTL = 10 * time.Second
// Rely on older value if usage loading fails from disk.
sys.bucketStorageCache.Relax = true
sys.bucketStorageCache.Update = func() (interface{}, error) {
ctx, done := context.WithTimeout(context.Background(), 1*time.Second)
defer done()
return loadDataUsageFromBackend(ctx, objAPI)
}
})
}
// GetBucketUsageInfo return bucket usage info for a given bucket
func (sys *BucketQuotaSys) GetBucketUsageInfo(bucket string) (BucketUsageInfo, error) {
v, err := sys.bucketStorageCache.Get()
if err != nil && v != nil {
logger.LogIf(GlobalContext, fmt.Errorf("unable to retrieve usage information for bucket: %s, relying on older value cached in-memory: err(%v)", bucket, err))
}
if v == nil {
logger.LogIf(GlobalContext, errors.New("unable to retrieve usage information for bucket: %s, no reliable usage value available - quota will not be enforced"))
}
var bui BucketUsageInfo
dui, ok := v.(DataUsageInfo)
if ok {
bui = dui.BucketsUsage[bucket]
}
return bui, nil
}
// parseBucketQuota parses BucketQuota from json
func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota, err error) {
quotaCfg = &madmin.BucketQuota{}
if err = json.Unmarshal(data, quotaCfg); err != nil {
return quotaCfg, err
}
if !quotaCfg.IsValid() {
if quotaCfg.Type == "fifo" {
logger.LogIf(GlobalContext, errors.New("Detected older 'fifo' quota config, 'fifo' feature is removed and not supported anymore. Please clear your quota configs using 'mc admin bucket quota alias/bucket --clear' and use 'mc ilm add' for expiration of objects"))
return quotaCfg, fmt.Errorf("invalid quota type 'fifo'")
}
return quotaCfg, fmt.Errorf("Invalid quota config %#v", quotaCfg)
}
return
}
func (sys *BucketQuotaSys) enforceQuotaHard(ctx context.Context, bucket string, size int64) error {
if size < 0 {
return nil
}
q, err := sys.Get(ctx, bucket)
if err != nil {
return err
}
if q != nil && q.Type == madmin.HardQuota && q.Quota > 0 {
if uint64(size) >= q.Quota { // check if file size already exceeds the quota
return BucketQuotaExceeded{Bucket: bucket}
}
bui, err := sys.GetBucketUsageInfo(bucket)
if err != nil {
return err
}
if bui.Size > 0 && ((bui.Size + uint64(size)) >= q.Quota) {
return BucketQuotaExceeded{Bucket: bucket}
}
}
return nil
}
func enforceBucketQuotaHard(ctx context.Context, bucket string, size int64) error {
if globalBucketQuotaSys == nil {
return nil
}
return globalBucketQuotaSys.enforceQuotaHard(ctx, bucket, size)
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bufio"
"bytes"
"io"
"net/http"
"net/url"
"sort"
"strconv"
"testing"
"github.com/minio/minio/internal/dsync"
)
func BenchmarkLockArgs(b *testing.B) {
args := dsync.LockArgs{
Owner: "minio",
UID: "uid",
Source: "lockArgs.go",
Quorum: 3,
Resources: []string{"obj.txt"},
}
argBytes, err := args.MarshalMsg(nil)
if err != nil {
b.Fatal(err)
}
req := &http.Request{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
req.Body = io.NopCloser(bytes.NewReader(argBytes))
getLockArgs(req)
}
}
func BenchmarkLockArgsOld(b *testing.B) {
values := url.Values{}
values.Set("owner", "minio")
values.Set("uid", "uid")
values.Set("source", "lockArgs.go")
values.Set("quorum", "3")
req := &http.Request{
Form: values,
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
req.Body = io.NopCloser(bytes.NewReader([]byte(`obj.txt`)))
getLockArgsOld(req)
}
}
func getLockArgsOld(r *http.Request) (args dsync.LockArgs, err error) {
values := r.Form
quorum, err := strconv.Atoi(values.Get("quorum"))
if err != nil {
return args, err
}
args = dsync.LockArgs{
Owner: values.Get("onwer"),
UID: values.Get("uid"),
Source: values.Get("source"),
Quorum: quorum,
}
var resources []string
bio := bufio.NewScanner(r.Body)
for bio.Scan() {
resources = append(resources, bio.Text())
}
if err := bio.Err(); err != nil {
return args, err
}
sort.Strings(resources)
args.Resources = resources
return args, nil
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"errors"
"runtime/debug"
"sort"
"sync"
"time"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/console"
)
// a bucketMetacache keeps track of all caches generated
// for a bucket.
type bucketMetacache struct {
// Name of bucket
bucket string
// caches indexed by id.
caches map[string]metacache
// cache ids indexed by root paths
cachesRoot map[string][]string `msg:"-"`
// Internal state
mu sync.RWMutex `msg:"-"`
updated bool `msg:"-"`
}
type deleteAllStorager interface {
deleteAll(ctx context.Context, bucket, prefix string)
}
// newBucketMetacache creates a new bucketMetacache.
// Optionally remove all existing caches.
func newBucketMetacache(bucket string, cleanup bool) *bucketMetacache {
if cleanup {
// Recursively delete all caches.
objAPI := newObjectLayerFn()
if objAPI != nil {
ez, ok := objAPI.(deleteAllStorager)
if ok {
ctx := context.Background()
ez.deleteAll(ctx, minioMetaBucket, metacachePrefixForID(bucket, slashSeparator))
}
}
}
return &bucketMetacache{
bucket: bucket,
caches: make(map[string]metacache, 10),
cachesRoot: make(map[string][]string, 10),
}
}
func (b *bucketMetacache) debugf(format string, data ...interface{}) {
if serverDebugLog {
console.Debugf(format+"\n", data...)
}
}
// findCache will attempt to find a matching cache for the provided options.
// If a cache with the same ID exists already it will be returned.
// If none can be found a new is created with the provided ID.
func (b *bucketMetacache) findCache(o listPathOptions) metacache {
if b == nil {
logger.Info("bucketMetacache.findCache: nil cache for bucket %s", o.Bucket)
return metacache{}
}
if o.Bucket != b.bucket {
logger.Info("bucketMetacache.findCache: bucket %s does not match this bucket %s", o.Bucket, b.bucket)
debug.PrintStack()
return metacache{}
}
// Grab a write lock, since we create one if we cannot find one.
b.mu.Lock()
defer b.mu.Unlock()
// Check if exists already.
if c, ok := b.caches[o.ID]; ok {
c.lastHandout = time.Now()
b.caches[o.ID] = c
b.debugf("returning existing %v", o.ID)
return c
}
if !o.Create {
return metacache{
id: o.ID,
bucket: o.Bucket,
status: scanStateNone,
}
}
// Create new and add.
best := o.newMetacache()
b.caches[o.ID] = best
b.cachesRoot[best.root] = append(b.cachesRoot[best.root], best.id)
b.updated = true
b.debugf("returning new cache %s, bucket: %v", best.id, best.bucket)
return best
}
// cleanup removes redundant and outdated entries.
func (b *bucketMetacache) cleanup() {
// Entries to remove.
remove := make(map[string]struct{})
// Test on a copy
// cleanup is the only one deleting caches.
caches, _ := b.cloneCaches()
for id, cache := range caches {
if !cache.worthKeeping() {
b.debugf("cache %s not worth keeping", id)
remove[id] = struct{}{}
continue
}
if cache.id != id {
logger.Info("cache ID mismatch %s != %s", id, cache.id)
remove[id] = struct{}{}
continue
}
if cache.bucket != b.bucket {
logger.Info("cache bucket mismatch %s != %s", b.bucket, cache.bucket)
remove[id] = struct{}{}
continue
}
}
// If above limit, remove the caches with the oldest handout time.
if len(caches)-len(remove) > metacacheMaxEntries {
remainCaches := make([]metacache, 0, len(caches)-len(remove))
for id, cache := range caches {
if _, ok := remove[id]; ok {
continue
}
remainCaches = append(remainCaches, cache)
}
if len(remainCaches) > metacacheMaxEntries {
// Sort oldest last...
sort.Slice(remainCaches, func(i, j int) bool {
return remainCaches[i].lastHandout.Before(remainCaches[j].lastHandout)
})
// Keep first metacacheMaxEntries...
for _, cache := range remainCaches[metacacheMaxEntries:] {
if time.Since(cache.lastHandout) > metacacheMaxClientWait {
remove[cache.id] = struct{}{}
}
}
}
}
for id := range remove {
b.deleteCache(id)
}
}
// updateCacheEntry will update a cache.
// Returns the updated status.
func (b *bucketMetacache) updateCacheEntry(update metacache) (metacache, error) {
b.mu.Lock()
defer b.mu.Unlock()
existing, ok := b.caches[update.id]
if !ok {
return update, errFileNotFound
}
existing.update(update)
b.caches[update.id] = existing
b.updated = true
return existing, nil
}
// cloneCaches will return a clone of all current caches.
func (b *bucketMetacache) cloneCaches() (map[string]metacache, map[string][]string) {
b.mu.RLock()
defer b.mu.RUnlock()
dst := make(map[string]metacache, len(b.caches))
for k, v := range b.caches {
dst[k] = v
}
// Copy indexes
dst2 := make(map[string][]string, len(b.cachesRoot))
for k, v := range b.cachesRoot {
tmp := make([]string, len(v))
copy(tmp, v)
dst2[k] = tmp
}
return dst, dst2
}
// deleteAll will delete all on disk data for ALL caches.
// Deletes are performed concurrently.
func (b *bucketMetacache) deleteAll() {
ctx := context.Background()
objAPI := newObjectLayerFn()
if objAPI == nil {
return
}
ez, ok := objAPI.(deleteAllStorager)
if !ok {
logger.LogIf(ctx, errors.New("bucketMetacache: expected objAPI to be 'deleteAllStorager'"))
return
}
b.mu.Lock()
defer b.mu.Unlock()
b.updated = true
// Delete all.
ez.deleteAll(ctx, minioMetaBucket, metacachePrefixForID(b.bucket, slashSeparator))
b.caches = make(map[string]metacache, 10)
b.cachesRoot = make(map[string][]string, 10)
}
// deleteCache will delete a specific cache and all files related to it across the cluster.
func (b *bucketMetacache) deleteCache(id string) {
b.mu.Lock()
c, ok := b.caches[id]
if ok {
// Delete from root map.
list := b.cachesRoot[c.root]
for i, lid := range list {
if id == lid {
list = append(list[:i], list[i+1:]...)
break
}
}
b.cachesRoot[c.root] = list
delete(b.caches, id)
b.updated = true
}
b.mu.Unlock()
if ok {
c.delete(context.Background())
}
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package rest
import (
"net/http"
"net/http/httptrace"
"sync/atomic"
"time"
)
var globalStats = struct {
errs uint64
tcpDialErrs uint64
tcpDialCount uint64
tcpDialTotalDur uint64
}{}
// RPCStats holds information about the DHCP/TCP metrics and errors
type RPCStats struct {
Errs uint64
DialAvgDuration uint64
DialErrs uint64
}
// GetRPCStats returns RPC stats, include calls errors and dhcp/tcp metrics
func GetRPCStats() RPCStats {
s := RPCStats{
Errs: atomic.LoadUint64(&globalStats.errs),
DialErrs: atomic.LoadUint64(&globalStats.tcpDialErrs),
}
if v := atomic.LoadUint64(&globalStats.tcpDialCount); v > 0 {
s.DialAvgDuration = atomic.LoadUint64(&globalStats.tcpDialTotalDur) / v
}
return s
}
// Return a function which update the global stats related to tcp connections
func setupReqStatsUpdate(req *http.Request) (*http.Request, func()) {
var dialStart, dialEnd int64
trace := &httptrace.ClientTrace{
ConnectStart: func(network, addr string) {
atomic.StoreInt64(&dialStart, time.Now().UnixNano())
},
ConnectDone: func(network, addr string, err error) {
if err == nil {
atomic.StoreInt64(&dialEnd, time.Now().UnixNano())
}
},
}
return req.WithContext(httptrace.WithClientTrace(req.Context(), trace)), func() {
if ds := atomic.LoadInt64(&dialStart); ds > 0 {
if de := atomic.LoadInt64(&dialEnd); de == 0 {
atomic.AddUint64(&globalStats.tcpDialErrs, 1)
} else if de >= ds {
atomic.AddUint64(&globalStats.tcpDialCount, 1)
atomic.AddUint64(&globalStats.tcpDialTotalDur, uint64(dialEnd-dialStart))
}
}
}
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strings"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/config"
cfgldap "github.com/minio/minio/internal/config/identity/ldap"
"github.com/minio/minio/internal/config/identity/openid"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
iampolicy "github.com/minio/pkg/iam/policy"
"github.com/minio/pkg/ldap"
)
func (a adminAPIHandlers) addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, isUpdate bool) {
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
return
}
// Ensure body content type is opaque to ensure that request body has not
// been interpreted as form data.
contentType := r.Header.Get("Content-Type")
if contentType != "application/octet-stream" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
return
}
password := <PASSWORD>
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
logger.LogIf(ctx, err, logger.Application)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
idpCfgType := mux.Vars(r)["type"]
if !madmin.ValidIDPConfigTypes.Contains(idpCfgType) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigInvalidIDPType), r.URL)
return
}
var subSys string
switch idpCfgType {
case madmin.OpenidIDPCfg:
subSys = madmin.IdentityOpenIDSubSys
case madmin.LDAPIDPCfg:
subSys = madmin.IdentityLDAPSubSys
}
cfgName := mux.Vars(r)["name"]
cfgTarget := madmin.Default
if cfgName != "" {
cfgTarget = cfgName
if idpCfgType == madmin.LDAPIDPCfg && cfgName != madmin.Default {
// LDAP does not support multiple configurations. So cfgName must be
// empty or `madmin.Default`.
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigLDAPNonDefaultConfigName), r.URL)
return
}
}
// Check that this is a valid Create vs Update API call.
s := globalServerConfig.Clone()
if apiErrCode := handleCreateUpdateValidation(s, subSys, cfgTarget, isUpdate); apiErrCode != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(apiErrCode), r.URL)
return
}
cfgData := ""
{
tgtSuffix := ""
if cfgTarget != madmin.Default {
tgtSuffix = config.SubSystemSeparator + cfgTarget
}
cfgData = subSys + tgtSuffix + config.KvSpaceSeparator + string(reqBytes)
}
cfg, err := readServerConfig(ctx, objectAPI, nil)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
dynamic, err := cfg.ReadConfig(strings.NewReader(cfgData))
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// IDP config is not dynamic. Sanity check.
if dynamic {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
return
}
if err = validateConfig(ctx, cfg, subSys); err != nil {
var validationErr ldap.Validation
if errors.As(err, &validationErr) {
// If we got an LDAP validation error, we need to send appropriate
// error message back to client (likely mc).
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigLDAPValidation),
validationErr.FormatError(), r.URL)
return
}
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
return
}
// Update the actual server config on disk.
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Write to the config input KV to history.
if err = saveServerConfigHistory(ctx, objectAPI, []byte(cfgData)); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseHeadersOnly(w)
}
func handleCreateUpdateValidation(s config.Config, subSys, cfgTarget string, isUpdate bool) APIErrorCode {
if cfgTarget != madmin.Default {
// This cannot give an error at this point.
subSysTargets, _ := s.GetAvailableTargets(subSys)
subSysTargetsSet := set.CreateStringSet(subSysTargets...)
if isUpdate && !subSysTargetsSet.Contains(cfgTarget) {
return ErrAdminConfigIDPCfgNameDoesNotExist
}
if !isUpdate && subSysTargetsSet.Contains(cfgTarget) {
return ErrAdminConfigIDPCfgNameAlreadyExists
}
return ErrNone
}
// For the default configuration name, since it will always be an available
// target, we need to check if a configuration value has been set previously
// to figure out if this is a valid create or update API call.
// This cannot really error (FIXME: improve the type for GetConfigInfo)
var cfgInfos []madmin.IDPCfgInfo
switch subSys {
case madmin.IdentityOpenIDSubSys:
cfgInfos, _ = globalIAMSys.OpenIDConfig.GetConfigInfo(s, cfgTarget)
case madmin.IdentityLDAPSubSys:
cfgInfos, _ = globalIAMSys.LDAPConfig.GetConfigInfo(s, cfgTarget)
}
if len(cfgInfos) > 0 && !isUpdate {
return ErrAdminConfigIDPCfgNameAlreadyExists
}
if len(cfgInfos) == 0 && isUpdate {
return ErrAdminConfigIDPCfgNameDoesNotExist
}
return ErrNone
}
// AddIdentityProviderCfg: adds a new IDP config for openid/ldap.
//
// PUT <admin-prefix>/idp-cfg/openid/dex1 -> create named config `dex1`
//
// PUT <admin-prefix>/idp-cfg/openid/_ -> create (default) named config `_`
func (a adminAPIHandlers) AddIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AddIdentityProviderCfg")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
a.addOrUpdateIDPHandler(ctx, w, r, false)
}
// UpdateIdentityProviderCfg: updates an existing IDP config for openid/ldap.
//
// PATCH <admin-prefix>/idp-cfg/openid/dex1 -> update named config `dex1`
//
// PATCH <admin-prefix>/idp-cfg/openid/_ -> update (default) named config `_`
func (a adminAPIHandlers) UpdateIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "UpdateIdentityProviderCfg")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
a.addOrUpdateIDPHandler(ctx, w, r, true)
}
// ListIdentityProviderCfg:
//
// GET <admin-prefix>/idp-cfg/openid -> lists openid provider configs.
func (a adminAPIHandlers) ListIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListIdentityProviderCfg")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
password := <PASSWORD>
idpCfgType := mux.Vars(r)["type"]
if !madmin.ValidIDPConfigTypes.Contains(idpCfgType) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigInvalidIDPType), r.URL)
return
}
var cfgList []madmin.IDPListItem
var err error
switch idpCfgType {
case madmin.OpenidIDPCfg:
cfg := globalServerConfig.Clone()
cfgList, err = globalIAMSys.OpenIDConfig.GetConfigList(cfg)
case madmin.LDAPIDPCfg:
cfg := globalServerConfig.Clone()
cfgList, err = globalIAMSys.LDAPConfig.GetConfigList(cfg)
default:
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
data, err := json.Marshal(cfgList)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
econfigData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, econfigData)
}
// GetIdentityProviderCfg:
//
// GET <admin-prefix>/idp-cfg/openid/dex_test
func (a adminAPIHandlers) GetIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetIdentityProviderCfg")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
idpCfgType := mux.Vars(r)["type"]
cfgName := mux.Vars(r)["name"]
password := <PASSWORD>
if !madmin.ValidIDPConfigTypes.Contains(idpCfgType) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigInvalidIDPType), r.URL)
return
}
cfg := globalServerConfig.Clone()
var cfgInfos []madmin.IDPCfgInfo
var err error
switch idpCfgType {
case madmin.OpenidIDPCfg:
cfgInfos, err = globalIAMSys.OpenIDConfig.GetConfigInfo(cfg, cfgName)
case madmin.LDAPIDPCfg:
cfgInfos, err = globalIAMSys.LDAPConfig.GetConfigInfo(cfg, cfgName)
}
if err != nil {
if errors.Is(err, openid.ErrProviderConfigNotFound) || errors.Is(err, cfgldap.ErrProviderConfigNotFound) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminNoSuchConfigTarget), r.URL)
return
}
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
res := madmin.IDPConfig{
Type: idpCfgType,
Name: cfgName,
Info: cfgInfos,
}
data, err := json.Marshal(res)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
econfigData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, econfigData)
}
// DeleteIdentityProviderCfg:
//
// DELETE <admin-prefix>/idp-cfg/openid/dex_test
func (a adminAPIHandlers) DeleteIdentityProviderCfg(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteIdentityProviderCfg")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ConfigUpdateAdminAction)
if objectAPI == nil {
return
}
idpCfgType := mux.Vars(r)["type"]
cfgName := mux.Vars(r)["name"]
if !madmin.ValidIDPConfigTypes.Contains(idpCfgType) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigInvalidIDPType), r.URL)
return
}
cfgCopy := globalServerConfig.Clone()
var subSys string
switch idpCfgType {
case madmin.OpenidIDPCfg:
subSys = config.IdentityOpenIDSubSys
cfgInfos, err := globalIAMSys.OpenIDConfig.GetConfigInfo(cfgCopy, cfgName)
if err != nil {
if errors.Is(err, openid.ErrProviderConfigNotFound) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminNoSuchConfigTarget), r.URL)
return
}
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
hasEnv := false
for _, ci := range cfgInfos {
if ci.IsCfg && ci.IsEnv {
hasEnv = true
break
}
}
if hasEnv {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigEnvOverridden), r.URL)
return
}
case madmin.LDAPIDPCfg:
subSys = config.IdentityLDAPSubSys
cfgInfos, err := globalIAMSys.LDAPConfig.GetConfigInfo(cfgCopy, cfgName)
if err != nil {
if errors.Is(err, openid.ErrProviderConfigNotFound) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminNoSuchConfigTarget), r.URL)
return
}
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
hasEnv := false
for _, ci := range cfgInfos {
if ci.IsCfg && ci.IsEnv {
hasEnv = true
break
}
}
if hasEnv {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigEnvOverridden), r.URL)
return
}
default:
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
cfg, err := readServerConfig(ctx, objectAPI, nil)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
cfgKey := fmt.Sprintf("%s:%s", subSys, cfgName)
if cfgName == madmin.Default {
cfgKey = subSys
}
if err = cfg.DelKVS(cfgKey); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = validateConfig(ctx, cfg, subSys); err != nil {
var validationErr ldap.Validation
if errors.As(err, &validationErr) {
// If we got an LDAP validation error, we need to send appropriate
// error message back to client (likely mc).
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigLDAPValidation),
validationErr.FormatError(), r.URL)
return
}
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
return
}
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
dynamic := config.SubSystemsDynamic.Contains(subSys)
if dynamic {
applyDynamic(ctx, objectAPI, cfg, subSys, r, w)
}
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"fmt"
"strings"
"testing"
"time"
)
func TestBootstrap(t *testing.T) {
// Bootstrap events exceed bootstrap messages limit
bsTracer := &bootstrapTracer{}
for i := 0; i < bootstrapMsgsLimit+10; i++ {
bsTracer.Record(fmt.Sprintf("msg-%d", i), 1)
}
traceInfos := bsTracer.Events()
if len(traceInfos) != bootstrapMsgsLimit {
t.Fatalf("Expected length of events %d but got %d", bootstrapMsgsLimit, len(traceInfos))
}
// Simulate the case where bootstrap events were updated a day ago
bsTracer.lastUpdate = time.Now().UTC().Add(-25 * time.Hour)
bsTracer.DropEvents()
if !bsTracer.Empty() {
t.Fatalf("Expected all bootstrap events to have been dropped, but found %d events", len(bsTracer.Events()))
}
// Fewer than 4K bootstrap events
for i := 0; i < 10; i++ {
bsTracer.Record(fmt.Sprintf("msg-%d", i), 1)
}
events := bsTracer.Events()
if len(events) != 10 {
t.Fatalf("Expected length of events %d but got %d", 10, len(events))
}
for i, traceInfo := range bsTracer.Events() {
msg := fmt.Sprintf("msg-%d", i)
if !strings.HasSuffix(traceInfo.Message, msg) {
t.Fatalf("Expected %s but got %s", msg, traceInfo.Message)
}
}
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package subnet
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"mime/multipart"
"net/http"
"time"
xhttp "github.com/minio/minio/internal/http"
)
const (
respBodyLimit = 1 << 20 // 1 MiB
// LoggerWebhookName - subnet logger webhook target
LoggerWebhookName = "subnet"
)
// Upload given file content (payload) to specified URL
func (c Config) Upload(reqURL string, filename string, payload []byte) (string, error) {
if !c.Registered() {
return "", errors.New("Deployment is not registered with SUBNET. Please register the deployment via 'mc license register ALIAS'")
}
var body bytes.Buffer
writer := multipart.NewWriter(&body)
part, e := writer.CreateFormFile("file", filename)
if e != nil {
return "", e
}
if _, e = part.Write(payload); e != nil {
return "", e
}
writer.Close()
r, e := http.NewRequest(http.MethodPost, reqURL, &body)
if e != nil {
return "", e
}
r.Header.Add("Content-Type", writer.FormDataContentType())
return c.submitPost(r)
}
func (c Config) submitPost(r *http.Request) (string, error) {
configLock.RLock()
r.Header.Set(xhttp.SubnetAPIKey, c.APIKey)
configLock.RUnlock()
r.Header.Set(xhttp.MinioDeploymentID, xhttp.GlobalDeploymentID)
client := &http.Client{
Timeout: 10 * time.Second,
Transport: c.transport,
}
resp, err := client.Do(r)
if err != nil {
return "", err
}
defer xhttp.DrainBody(resp.Body)
respBytes, err := io.ReadAll(io.LimitReader(resp.Body, respBodyLimit))
if err != nil {
return "", err
}
respStr := string(respBytes)
if resp.StatusCode == http.StatusOK {
return respStr, nil
}
return respStr, fmt.Errorf("SUBNET request failed with code %d and error: %s", resp.StatusCode, respStr)
}
// Post submit 'payload' to specified URL
func (c Config) Post(reqURL string, payload interface{}) (string, error) {
if !c.Registered() {
return "", errors.New("Deployment is not registered with SUBNET. Please register the deployment via 'mc license register ALIAS'")
}
body, err := json.Marshal(payload)
if err != nil {
return "", err
}
r, err := http.NewRequest(http.MethodPost, reqURL, bytes.NewReader(body))
if err != nil {
return "", err
}
r.Header.Set("Content-Type", "application/json")
return c.submitPost(r)
}
<file_sep>//go:build ignore
// +build ignore
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"context"
"flag"
"fmt"
"log"
"net/url"
"time"
"github.com/minio/minio-go/v7"
cr "github.com/minio/minio-go/v7/pkg/credentials"
)
var (
// LDAP integrated Minio endpoint
stsEndpoint string
// token to use with AssumeRoleWithCustomToken
token string
// Role ARN to use
roleArn string
// Display credentials flag
displayCreds bool
// Credential expiry duration
expiryDuration time.Duration
// Bucket to list
bucketToList string
)
func init() {
flag.StringVar(&stsEndpoint, "sts-ep", "http://localhost:9000", "STS endpoint")
flag.StringVar(&token, "t", "", "Token to use with AssumeRoleWithCustomToken STS API (required)")
flag.StringVar(&roleArn, "r", "", "RoleARN to use with the request (required)")
flag.BoolVar(&displayCreds, "d", false, "Only show generated credentials")
flag.DurationVar(&expiryDuration, "e", 0, "Request a duration of validity for the generated credential")
flag.StringVar(&bucketToList, "b", "mybucket", "Bucket to list (defaults to mybucket)")
}
func main() {
flag.Parse()
if token == "" || roleArn == "" {
flag.PrintDefaults()
return
}
// The credentials package in minio-go provides an interface to call the
// AssumeRoleWithCustomToken STS API.
var opts []cr.CustomTokenOpt
if expiryDuration != 0 {
opts = append(opts, cr.CustomTokenValidityOpt(expiryDuration))
}
// Initialize
li, err := cr.NewCustomTokenCredentials(stsEndpoint, token, roleArn, opts...)
if err != nil {
log.Fatalf("Error initializing CustomToken Identity: %v", err)
}
v, err := li.Get()
if err != nil {
log.Fatalf("Error retrieving STS credentials: %v", err)
}
if displayCreds {
fmt.Println("Only displaying credentials:")
fmt.Println("AccessKeyID:", v.AccessKeyID)
fmt.Println("SecretAccessKey:", v.SecretAccessKey)
fmt.Println("SessionToken:", v.SessionToken)
return
}
// Use generated credentials to authenticate with MinIO server
stsEndpointURL, err := url.Parse(stsEndpoint)
if err != nil {
log.Fatalf("Error parsing sts endpoint: %v", err)
}
copts := &minio.Options{
Creds: li,
Secure: stsEndpointURL.Scheme == "https",
}
minioClient, err := minio.New(stsEndpointURL.Host, copts)
if err != nil {
log.Fatalf("Error initializing client: ", err)
}
// Use minIO Client object normally like the regular client.
fmt.Printf("Calling list objects on bucket named `%s` with temp creds:\n===\n", bucketToList)
objCh := minioClient.ListObjects(context.Background(), bucketToList, minio.ListObjectsOptions{})
for obj := range objCh {
if obj.Err != nil {
log.Fatalf("Listing error: %v", obj.Err)
}
fmt.Printf("Key: %s\nSize: %d\nLast Modified: %s\n===\n", obj.Key, obj.Size, obj.LastModified)
}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"sort"
"strings"
"time"
jsoniter "github.com/json-iterator/go"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/config/identity/openid"
"github.com/minio/minio/internal/jwt"
"github.com/minio/minio/internal/logger"
iampolicy "github.com/minio/pkg/iam/policy"
)
const (
// IAM configuration directory.
iamConfigPrefix = minioConfigPrefix + "/iam"
// IAM users directory.
iamConfigUsersPrefix = iamConfigPrefix + "/users/"
// IAM service accounts directory.
iamConfigServiceAccountsPrefix = iamConfigPrefix + "/service-accounts/"
// IAM groups directory.
iamConfigGroupsPrefix = iamConfigPrefix + "/groups/"
// IAM policies directory.
iamConfigPoliciesPrefix = iamConfigPrefix + "/policies/"
// IAM sts directory.
iamConfigSTSPrefix = iamConfigPrefix + "/sts/"
// IAM Policy DB prefixes.
iamConfigPolicyDBPrefix = iamConfigPrefix + "/policydb/"
iamConfigPolicyDBUsersPrefix = iamConfigPolicyDBPrefix + "users/"
iamConfigPolicyDBSTSUsersPrefix = iamConfigPolicyDBPrefix + "sts-users/"
iamConfigPolicyDBServiceAccountsPrefix = iamConfigPolicyDBPrefix + "service-accounts/"
iamConfigPolicyDBGroupsPrefix = iamConfigPolicyDBPrefix + "groups/"
// IAM identity file which captures identity credentials.
iamIdentityFile = "identity.json"
// IAM policy file which provides policies for each users.
iamPolicyFile = "policy.json"
// IAM group members file
iamGroupMembersFile = "members.json"
// IAM format file
iamFormatFile = "format.json"
iamFormatVersion1 = 1
minServiceAccountExpiry time.Duration = 15 * time.Minute
maxServiceAccountExpiry time.Duration = 365 * 24 * time.Hour
)
var errInvalidSvcAcctExpiration = errors.New("invalid service account expiration")
type iamFormat struct {
Version int `json:"version"`
}
func newIAMFormatVersion1() iamFormat {
return iamFormat{Version: iamFormatVersion1}
}
func getIAMFormatFilePath() string {
return iamConfigPrefix + SlashSeparator + iamFormatFile
}
func getUserIdentityPath(user string, userType IAMUserType) string {
var basePath string
switch userType {
case svcUser:
basePath = iamConfigServiceAccountsPrefix
case stsUser:
basePath = iamConfigSTSPrefix
default:
basePath = iamConfigUsersPrefix
}
return pathJoin(basePath, user, iamIdentityFile)
}
func saveIAMFormat(ctx context.Context, store IAMStorageAPI) error {
bootstrapTrace("Load IAM format file")
var iamFmt iamFormat
path := getIAMFormatFilePath()
if err := store.loadIAMConfig(ctx, &iamFmt, path); err != nil {
switch err {
case errConfigNotFound:
// Need to migrate to V1.
default:
// if IAM format
return err
}
}
if iamFmt.Version >= iamFormatVersion1 {
// Nothing to do.
return nil
}
bootstrapTrace("Write IAM format file")
// Save iam format to version 1.
if err := store.saveIAMConfig(ctx, newIAMFormatVersion1(), path); err != nil {
logger.LogIf(ctx, err)
return err
}
return nil
}
func getGroupInfoPath(group string) string {
return pathJoin(iamConfigGroupsPrefix, group, iamGroupMembersFile)
}
func getPolicyDocPath(name string) string {
return pathJoin(iamConfigPoliciesPrefix, name, iamPolicyFile)
}
func getMappedPolicyPath(name string, userType IAMUserType, isGroup bool) string {
if isGroup {
return pathJoin(iamConfigPolicyDBGroupsPrefix, name+".json")
}
switch userType {
case svcUser:
return pathJoin(iamConfigPolicyDBServiceAccountsPrefix, name+".json")
case stsUser:
return pathJoin(iamConfigPolicyDBSTSUsersPrefix, name+".json")
default:
return pathJoin(iamConfigPolicyDBUsersPrefix, name+".json")
}
}
// UserIdentity represents a user's secret key and their status
type UserIdentity struct {
Version int `json:"version"`
Credentials auth.Credentials `json:"credentials"`
UpdatedAt time.Time `json:"updatedAt,omitempty"`
}
func newUserIdentity(cred auth.Credentials) UserIdentity {
return UserIdentity{Version: 1, Credentials: cred, UpdatedAt: UTCNow()}
}
// GroupInfo contains info about a group
type GroupInfo struct {
Version int `json:"version"`
Status string `json:"status"`
Members []string `json:"members"`
UpdatedAt time.Time `json:"updatedAt,omitempty"`
}
func newGroupInfo(members []string) GroupInfo {
return GroupInfo{Version: 1, Status: statusEnabled, Members: members, UpdatedAt: UTCNow()}
}
// MappedPolicy represents a policy name mapped to a user or group
type MappedPolicy struct {
Version int `json:"version"`
Policies string `json:"policy"`
UpdatedAt time.Time `json:"updatedAt,omitempty"`
}
// converts a mapped policy into a slice of distinct policies
func (mp MappedPolicy) toSlice() []string {
var policies []string
for _, policy := range strings.Split(mp.Policies, ",") {
if strings.TrimSpace(policy) == "" {
continue
}
policies = append(policies, policy)
}
return policies
}
func (mp MappedPolicy) policySet() set.StringSet {
return set.CreateStringSet(mp.toSlice()...)
}
func newMappedPolicy(policy string) MappedPolicy {
return MappedPolicy{Version: 1, Policies: policy, UpdatedAt: UTCNow()}
}
// PolicyDoc represents an IAM policy with some metadata.
type PolicyDoc struct {
Version int `json:",omitempty"`
Policy iampolicy.Policy
CreateDate time.Time `json:",omitempty"`
UpdateDate time.Time `json:",omitempty"`
}
func newPolicyDoc(p iampolicy.Policy) PolicyDoc {
now := UTCNow().Round(time.Millisecond)
return PolicyDoc{
Version: 1,
Policy: p,
CreateDate: now,
UpdateDate: now,
}
}
// defaultPolicyDoc - used to wrap a default policy as PolicyDoc.
func defaultPolicyDoc(p iampolicy.Policy) PolicyDoc {
return PolicyDoc{
Version: 1,
Policy: p,
}
}
func (d *PolicyDoc) update(p iampolicy.Policy) {
now := UTCNow().Round(time.Millisecond)
d.UpdateDate = now
if d.CreateDate.IsZero() {
d.CreateDate = now
}
d.Policy = p
}
// parseJSON parses both the old and the new format for storing policy
// definitions.
//
// The on-disk format of policy definitions has changed (around early 12/2021)
// from iampolicy.Policy to PolicyDoc. To avoid a migration, loading supports
// both the old and the new formats.
func (d *PolicyDoc) parseJSON(data []byte) error {
json := jsoniter.ConfigCompatibleWithStandardLibrary
var doc PolicyDoc
err := json.Unmarshal(data, &doc)
if err != nil {
err2 := json.Unmarshal(data, &doc.Policy)
if err2 != nil {
// Just return the first error.
return err
}
d.Policy = doc.Policy
return nil
}
*d = doc
return nil
}
// key options
type options struct {
ttl int64 // expiry in seconds
}
type iamWatchEvent struct {
isCreated bool // !isCreated implies a delete event.
keyPath string
}
// iamCache contains in-memory cache of IAM data.
type iamCache struct {
updatedAt time.Time
// map of policy names to policy definitions
iamPolicyDocsMap map[string]PolicyDoc
// map of usernames to credentials
iamUsersMap map[string]UserIdentity
// map of group names to group info
iamGroupsMap map[string]GroupInfo
// map of user names to groups they are a member of
iamUserGroupMemberships map[string]set.StringSet
// map of usernames/temporary access keys to policy names
iamUserPolicyMap map[string]MappedPolicy
// map of group names to policy names
iamGroupPolicyMap map[string]MappedPolicy
}
func newIamCache() *iamCache {
return &iamCache{
iamPolicyDocsMap: map[string]PolicyDoc{},
iamUsersMap: map[string]UserIdentity{},
iamGroupsMap: map[string]GroupInfo{},
iamUserGroupMemberships: map[string]set.StringSet{},
iamUserPolicyMap: map[string]MappedPolicy{},
iamGroupPolicyMap: map[string]MappedPolicy{},
}
}
// buildUserGroupMemberships - builds the memberships map. IMPORTANT:
// Assumes that c.Lock is held by caller.
func (c *iamCache) buildUserGroupMemberships() {
for group, gi := range c.iamGroupsMap {
c.updateGroupMembershipsMap(group, &gi)
}
}
// updateGroupMembershipsMap - updates the memberships map for a
// group. IMPORTANT: Assumes c.Lock() is held by caller.
func (c *iamCache) updateGroupMembershipsMap(group string, gi *GroupInfo) {
if gi == nil {
return
}
for _, member := range gi.Members {
v := c.iamUserGroupMemberships[member]
if v == nil {
v = set.CreateStringSet(group)
} else {
v.Add(group)
}
c.iamUserGroupMemberships[member] = v
}
}
// removeGroupFromMembershipsMap - removes the group from every member
// in the cache. IMPORTANT: Assumes c.Lock() is held by caller.
func (c *iamCache) removeGroupFromMembershipsMap(group string) {
for member, groups := range c.iamUserGroupMemberships {
if !groups.Contains(group) {
continue
}
groups.Remove(group)
c.iamUserGroupMemberships[member] = groups
}
}
// policyDBGet - lower-level helper; does not take locks.
//
// If a group is passed, it returns policies associated with the group.
//
// If a user is passed, it returns policies of the user along with any groups
// that the server knows the user is a member of.
//
// In LDAP users mode, the server does not store any group membership
// information in IAM (i.e sys.iam*Map) - this info is stored only in the STS
// generated credentials. Thus we skip looking up group memberships, user map,
// and group map and check the appropriate policy maps directly.
func (c *iamCache) policyDBGet(mode UsersSysType, name string, isGroup bool) ([]string, time.Time, error) {
if isGroup {
if mode == MinIOUsersSysType {
g, ok := c.iamGroupsMap[name]
if !ok {
return nil, time.Time{}, errNoSuchGroup
}
// Group is disabled, so we return no policy - this
// ensures the request is denied.
if g.Status == statusDisabled {
return nil, time.Time{}, nil
}
}
return c.iamGroupPolicyMap[name].toSlice(), c.iamGroupPolicyMap[name].UpdatedAt, nil
}
// When looking for a user's policies, we also check if the user
// and the groups they are member of are enabled.
u, ok := c.iamUsersMap[name]
if ok {
if !u.Credentials.IsValid() {
return nil, time.Time{}, nil
}
}
mp := c.iamUserPolicyMap[name]
// returned policy could be empty
policies := mp.toSlice()
for _, group := range c.iamUserGroupMemberships[name].ToSlice() {
// Skip missing or disabled groups
gi, ok := c.iamGroupsMap[group]
if !ok || gi.Status == statusDisabled {
continue
}
policies = append(policies, c.iamGroupPolicyMap[group].toSlice()...)
}
return policies, mp.UpdatedAt, nil
}
func (c *iamCache) updateUserWithClaims(key string, u UserIdentity) error {
if u.Credentials.SessionToken != "" {
jwtClaims, err := extractJWTClaims(u)
if err != nil {
return err
}
u.Credentials.Claims = jwtClaims.Map()
}
c.iamUsersMap[key] = u
c.updatedAt = time.Now()
return nil
}
// IAMStorageAPI defines an interface for the IAM persistence layer
type IAMStorageAPI interface {
// The role of the read-write lock is to prevent go routines from
// concurrently reading and writing the IAM storage. The (r)lock()
// functions return the iamCache. The cache can be safely written to
// only when returned by `lock()`.
lock() *iamCache
unlock()
rlock() *iamCache
runlock()
getUsersSysType() UsersSysType
loadPolicyDoc(ctx context.Context, policy string, m map[string]PolicyDoc) error
loadPolicyDocs(ctx context.Context, m map[string]PolicyDoc) error
loadUser(ctx context.Context, user string, userType IAMUserType, m map[string]UserIdentity) error
loadUsers(ctx context.Context, userType IAMUserType, m map[string]UserIdentity) error
loadGroup(ctx context.Context, group string, m map[string]GroupInfo) error
loadGroups(ctx context.Context, m map[string]GroupInfo) error
loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, m map[string]MappedPolicy) error
loadMappedPolicies(ctx context.Context, userType IAMUserType, isGroup bool, m map[string]MappedPolicy) error
saveIAMConfig(ctx context.Context, item interface{}, path string, opts ...options) error
loadIAMConfig(ctx context.Context, item interface{}, path string) error
deleteIAMConfig(ctx context.Context, path string) error
savePolicyDoc(ctx context.Context, policyName string, p PolicyDoc) error
saveMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, mp MappedPolicy, opts ...options) error
saveUserIdentity(ctx context.Context, name string, userType IAMUserType, u UserIdentity, opts ...options) error
saveGroupInfo(ctx context.Context, group string, gi GroupInfo) error
deletePolicyDoc(ctx context.Context, policyName string) error
deleteMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool) error
deleteUserIdentity(ctx context.Context, name string, userType IAMUserType) error
deleteGroupInfo(ctx context.Context, name string) error
}
// iamStorageWatcher is implemented by `IAMStorageAPI` implementers that
// additionally support watching storage for changes.
type iamStorageWatcher interface {
watch(ctx context.Context, keyPath string) <-chan iamWatchEvent
}
// Set default canned policies only if not already overridden by users.
func setDefaultCannedPolicies(policies map[string]PolicyDoc) {
for _, v := range iampolicy.DefaultPolicies {
if _, ok := policies[v.Name]; !ok {
policies[v.Name] = defaultPolicyDoc(v.Definition)
}
}
}
// LoadIAMCache reads all IAM items and populates a new iamCache object and
// replaces the in-memory cache object.
func (store *IAMStoreSys) LoadIAMCache(ctx context.Context) error {
bootstrapTrace("loading IAM data")
newCache := newIamCache()
loadedAt := time.Now()
if iamOS, ok := store.IAMStorageAPI.(*IAMObjectStore); ok {
err := iamOS.loadAllFromObjStore(ctx, newCache)
if err != nil {
return err
}
} else {
bootstrapTrace("loading policy documents")
if err := store.loadPolicyDocs(ctx, newCache.iamPolicyDocsMap); err != nil {
return err
}
// Sets default canned policies, if none are set.
setDefaultCannedPolicies(newCache.iamPolicyDocsMap)
if store.getUsersSysType() == MinIOUsersSysType {
bootstrapTrace("loading regular users")
if err := store.loadUsers(ctx, regUser, newCache.iamUsersMap); err != nil {
return err
}
bootstrapTrace("loading regular groups")
if err := store.loadGroups(ctx, newCache.iamGroupsMap); err != nil {
return err
}
}
bootstrapTrace("loading user policy mapping")
// load polices mapped to users
if err := store.loadMappedPolicies(ctx, regUser, false, newCache.iamUserPolicyMap); err != nil {
return err
}
bootstrapTrace("loading group policy mapping")
// load policies mapped to groups
if err := store.loadMappedPolicies(ctx, regUser, true, newCache.iamGroupPolicyMap); err != nil {
return err
}
bootstrapTrace("loading service accounts")
// load service accounts
if err := store.loadUsers(ctx, svcUser, newCache.iamUsersMap); err != nil {
return err
}
bootstrapTrace("loading STS users")
// load STS temp users
if err := store.loadUsers(ctx, stsUser, newCache.iamUsersMap); err != nil {
return err
}
bootstrapTrace("loading STS policy mapping")
// load STS policy mappings
if err := store.loadMappedPolicies(ctx, stsUser, false, newCache.iamUserPolicyMap); err != nil {
return err
}
newCache.buildUserGroupMemberships()
}
cache := store.lock()
defer store.unlock()
// We should only update the in-memory cache if there were no changes
// to the in-memory cache since the disk loading began. If there
// were changes to the in-memory cache we should wait for the next
// cycle until we can safely update the in-memory cache.
//
// An in-memory cache must be replaced only if we know for sure that
// the values loaded from disk are not stale. They might be stale
// if the cached.updatedAt is recent than the refresh cycle began.
if cache.updatedAt.Before(loadedAt) {
// No one has updated anything since the config was loaded,
// so we just replace whatever is on the disk into memory.
cache.iamGroupPolicyMap = newCache.iamGroupPolicyMap
cache.iamGroupsMap = newCache.iamGroupsMap
cache.iamPolicyDocsMap = newCache.iamPolicyDocsMap
cache.iamUserGroupMemberships = newCache.iamUserGroupMemberships
cache.iamUserPolicyMap = newCache.iamUserPolicyMap
cache.iamUsersMap = newCache.iamUsersMap
cache.updatedAt = time.Now()
}
return nil
}
// IAMStoreSys contains IAMStorageAPI to add higher-level methods on the storage
// layer.
type IAMStoreSys struct {
IAMStorageAPI
}
// HasWatcher - returns if the storage system has a watcher.
func (store *IAMStoreSys) HasWatcher() bool {
_, ok := store.IAMStorageAPI.(iamStorageWatcher)
return ok
}
// GetUser - fetches credential from memory.
func (store *IAMStoreSys) GetUser(user string) (UserIdentity, bool) {
cache := store.rlock()
defer store.runlock()
u, ok := cache.iamUsersMap[user]
return u, ok
}
// GetMappedPolicy - fetches mapped policy from memory.
func (store *IAMStoreSys) GetMappedPolicy(name string, isGroup bool) (MappedPolicy, bool) {
cache := store.rlock()
defer store.runlock()
if isGroup {
v, ok := cache.iamGroupPolicyMap[name]
return v, ok
}
v, ok := cache.iamUserPolicyMap[name]
return v, ok
}
// GroupNotificationHandler - updates in-memory cache on notification of
// change (e.g. peer notification for object storage and etcd watch
// notification).
func (store *IAMStoreSys) GroupNotificationHandler(ctx context.Context, group string) error {
cache := store.lock()
defer store.unlock()
err := store.loadGroup(ctx, group, cache.iamGroupsMap)
if err != nil && err != errNoSuchGroup {
return err
}
if err == errNoSuchGroup {
// group does not exist - so remove from memory.
cache.removeGroupFromMembershipsMap(group)
delete(cache.iamGroupsMap, group)
delete(cache.iamGroupPolicyMap, group)
cache.updatedAt = time.Now()
return nil
}
gi := cache.iamGroupsMap[group]
// Updating the group memberships cache happens in two steps:
//
// 1. Remove the group from each user's list of memberships.
// 2. Add the group to each member's list of memberships.
//
// This ensures that regardless of members being added or
// removed, the cache stays current.
cache.removeGroupFromMembershipsMap(group)
cache.updateGroupMembershipsMap(group, &gi)
cache.updatedAt = time.Now()
return nil
}
// PolicyDBGet - fetches policies associated with the given user or group, and
// additional groups if provided.
func (store *IAMStoreSys) PolicyDBGet(name string, isGroup bool, groups ...string) ([]string, error) {
if name == "" {
return nil, errInvalidArgument
}
cache := store.rlock()
defer store.runlock()
policies, _, err := cache.policyDBGet(store.getUsersSysType(), name, isGroup)
if err != nil {
return nil, err
}
if !isGroup {
for _, group := range groups {
ps, _, err := cache.policyDBGet(store.getUsersSysType(), group, true)
if err != nil {
return nil, err
}
policies = append(policies, ps...)
}
}
return policies, nil
}
// AddUsersToGroup - adds users to group, creating the group if needed.
func (store *IAMStoreSys) AddUsersToGroup(ctx context.Context, group string, members []string) (updatedAt time.Time, err error) {
if group == "" {
return updatedAt, errInvalidArgument
}
cache := store.lock()
defer store.unlock()
// Validate that all members exist.
for _, member := range members {
u, ok := cache.iamUsersMap[member]
if !ok {
return updatedAt, errNoSuchUser
}
cr := u.Credentials
if cr.IsTemp() || cr.IsServiceAccount() {
return updatedAt, errIAMActionNotAllowed
}
}
gi, ok := cache.iamGroupsMap[group]
if !ok {
// Set group as enabled by default when it doesn't
// exist.
gi = newGroupInfo(members)
} else {
gi.Members = set.CreateStringSet(append(gi.Members, members...)...).ToSlice()
gi.UpdatedAt = UTCNow()
}
if err := store.saveGroupInfo(ctx, group, gi); err != nil {
return updatedAt, err
}
cache.iamGroupsMap[group] = gi
// update user-group membership map
for _, member := range members {
gset := cache.iamUserGroupMemberships[member]
if gset == nil {
gset = set.CreateStringSet(group)
} else {
gset.Add(group)
}
cache.iamUserGroupMemberships[member] = gset
}
cache.updatedAt = time.Now()
return gi.UpdatedAt, nil
}
// helper function - does not take any locks. Updates only cache if
// updateCacheOnly is set.
func removeMembersFromGroup(ctx context.Context, store *IAMStoreSys, cache *iamCache, group string, members []string, updateCacheOnly bool) (updatedAt time.Time, err error) {
gi, ok := cache.iamGroupsMap[group]
if !ok {
return updatedAt, errNoSuchGroup
}
s := set.CreateStringSet(gi.Members...)
d := set.CreateStringSet(members...)
gi.Members = s.Difference(d).ToSlice()
if !updateCacheOnly {
err := store.saveGroupInfo(ctx, group, gi)
if err != nil {
return updatedAt, err
}
}
gi.UpdatedAt = UTCNow()
cache.iamGroupsMap[group] = gi
// update user-group membership map
for _, member := range members {
gset := cache.iamUserGroupMemberships[member]
if gset == nil {
continue
}
gset.Remove(group)
cache.iamUserGroupMemberships[member] = gset
}
cache.updatedAt = time.Now()
return gi.UpdatedAt, nil
}
// RemoveUsersFromGroup - removes users from group, deleting it if it is empty.
func (store *IAMStoreSys) RemoveUsersFromGroup(ctx context.Context, group string, members []string) (updatedAt time.Time, err error) {
if group == "" {
return updatedAt, errInvalidArgument
}
cache := store.lock()
defer store.unlock()
// Validate that all members exist.
for _, member := range members {
u, ok := cache.iamUsersMap[member]
if !ok {
return updatedAt, errNoSuchUser
}
cr := u.Credentials
if cr.IsTemp() || cr.IsServiceAccount() {
return updatedAt, errIAMActionNotAllowed
}
}
gi, ok := cache.iamGroupsMap[group]
if !ok {
return updatedAt, errNoSuchGroup
}
// Check if attempting to delete a non-empty group.
if len(members) == 0 && len(gi.Members) != 0 {
return updatedAt, errGroupNotEmpty
}
if len(members) == 0 {
// len(gi.Members) == 0 here.
// Remove the group from storage. First delete the
// mapped policy. No-mapped-policy case is ignored.
if err := store.deleteMappedPolicy(ctx, group, regUser, true); err != nil && !errors.Is(err, errNoSuchPolicy) {
return updatedAt, err
}
if err := store.deleteGroupInfo(ctx, group); err != nil && err != errNoSuchGroup {
return updatedAt, err
}
// Delete from server memory
delete(cache.iamGroupsMap, group)
delete(cache.iamGroupPolicyMap, group)
cache.updatedAt = time.Now()
return cache.updatedAt, nil
}
return removeMembersFromGroup(ctx, store, cache, group, members, false)
}
// SetGroupStatus - updates group status
func (store *IAMStoreSys) SetGroupStatus(ctx context.Context, group string, enabled bool) (updatedAt time.Time, err error) {
if group == "" {
return updatedAt, errInvalidArgument
}
cache := store.lock()
defer store.unlock()
gi, ok := cache.iamGroupsMap[group]
if !ok {
return updatedAt, errNoSuchGroup
}
if enabled {
gi.Status = statusEnabled
} else {
gi.Status = statusDisabled
}
gi.UpdatedAt = UTCNow()
if err := store.saveGroupInfo(ctx, group, gi); err != nil {
return gi.UpdatedAt, err
}
cache.iamGroupsMap[group] = gi
cache.updatedAt = time.Now()
return gi.UpdatedAt, nil
}
// GetGroupDescription - builds up group description
func (store *IAMStoreSys) GetGroupDescription(group string) (gd madmin.GroupDesc, err error) {
cache := store.rlock()
defer store.runlock()
ps, updatedAt, err := cache.policyDBGet(store.getUsersSysType(), group, true)
if err != nil {
return gd, err
}
policy := strings.Join(ps, ",")
if store.getUsersSysType() != MinIOUsersSysType {
return madmin.GroupDesc{
Name: group,
Policy: policy,
UpdatedAt: updatedAt,
}, nil
}
gi, ok := cache.iamGroupsMap[group]
if !ok {
return gd, errNoSuchGroup
}
return madmin.GroupDesc{
Name: group,
Status: gi.Status,
Members: gi.Members,
Policy: policy,
UpdatedAt: gi.UpdatedAt,
}, nil
}
// ListGroups - lists groups. Since this is not going to be a frequent
// operation, we fetch this info from storage, and refresh the cache as well.
func (store *IAMStoreSys) ListGroups(ctx context.Context) (res []string, err error) {
cache := store.lock()
defer store.unlock()
if store.getUsersSysType() == MinIOUsersSysType {
m := map[string]GroupInfo{}
err = store.loadGroups(ctx, m)
if err != nil {
return
}
cache.iamGroupsMap = m
cache.updatedAt = time.Now()
for k := range cache.iamGroupsMap {
res = append(res, k)
}
}
if store.getUsersSysType() == LDAPUsersSysType {
m := map[string]MappedPolicy{}
err = store.loadMappedPolicies(ctx, stsUser, true, m)
if err != nil {
return
}
cache.iamGroupPolicyMap = m
cache.updatedAt = time.Now()
for k := range cache.iamGroupPolicyMap {
res = append(res, k)
}
}
return
}
// listGroups - lists groups - fetch groups from cache
func (store *IAMStoreSys) listGroups(ctx context.Context) (res []string, err error) {
cache := store.rlock()
defer store.runlock()
if store.getUsersSysType() == MinIOUsersSysType {
for k := range cache.iamGroupsMap {
res = append(res, k)
}
}
if store.getUsersSysType() == LDAPUsersSysType {
for k := range cache.iamGroupPolicyMap {
res = append(res, k)
}
}
return
}
// PolicyDBUpdate - adds or removes given policies to/from the user or group's
// policy associations.
func (store *IAMStoreSys) PolicyDBUpdate(ctx context.Context, name string, isGroup bool,
userType IAMUserType, policies []string, isAttach bool) (updatedAt time.Time,
addedOrRemoved, effectivePolicies []string, err error,
) {
if name == "" {
err = errInvalidArgument
return
}
cache := store.lock()
defer store.unlock()
// Load existing policy mapping
var mp MappedPolicy
if !isGroup {
mp = cache.iamUserPolicyMap[name]
} else {
if store.getUsersSysType() == MinIOUsersSysType {
g, ok := cache.iamGroupsMap[name]
if !ok {
err = errNoSuchGroup
return
}
if g.Status == statusDisabled {
err = errGroupDisabled
return
}
}
mp = cache.iamGroupPolicyMap[name]
}
// Compute net policy change effect and updated policy mapping
existingPolicySet := mp.policySet()
policiesToUpdate := set.CreateStringSet(policies...)
var newPolicySet set.StringSet
newPolicyMapping := mp
if isAttach {
// new policies to attach => inputPolicies - existing (set difference)
policiesToUpdate = policiesToUpdate.Difference(existingPolicySet)
// validate that new policies to add are defined.
for _, p := range policiesToUpdate.ToSlice() {
if _, found := cache.iamPolicyDocsMap[p]; !found {
err = errNoSuchPolicy
return
}
}
newPolicySet = existingPolicySet.Union(policiesToUpdate)
} else {
// policies to detach => inputPolicies โฉ existing (intersection)
policiesToUpdate = policiesToUpdate.Intersection(existingPolicySet)
newPolicySet = existingPolicySet.Difference(policiesToUpdate)
}
// We return an error if the requested policy update will have no effect.
if policiesToUpdate.IsEmpty() {
err = errNoPolicyToAttachOrDetach
return
}
newPolicies := newPolicySet.ToSlice()
newPolicyMapping.Policies = strings.Join(newPolicies, ",")
newPolicyMapping.UpdatedAt = UTCNow()
addedOrRemoved = policiesToUpdate.ToSlice()
if err = store.saveMappedPolicy(ctx, name, userType, isGroup, newPolicyMapping); err != nil {
return
}
if !isGroup {
cache.iamUserPolicyMap[name] = newPolicyMapping
} else {
cache.iamGroupPolicyMap[name] = newPolicyMapping
}
cache.updatedAt = UTCNow()
return cache.updatedAt, addedOrRemoved, newPolicies, nil
}
// PolicyDBSet - update the policy mapping for the given user or group in
// storage and in cache. We do not check for the existence of the user here
// since users can be virtual, such as for:
// - LDAP users
// - CommonName for STS accounts generated by AssumeRoleWithCertificate
func (store *IAMStoreSys) PolicyDBSet(ctx context.Context, name, policy string, userType IAMUserType, isGroup bool) (updatedAt time.Time, err error) {
if name == "" {
return updatedAt, errInvalidArgument
}
cache := store.lock()
defer store.unlock()
// Handle policy mapping removal.
if policy == "" {
if store.getUsersSysType() == LDAPUsersSysType {
// Add a fallback removal towards previous content that may come back
// as a ghost user due to lack of delete, this change occurred
// introduced in PR #11840
store.deleteMappedPolicy(ctx, name, regUser, false)
}
err := store.deleteMappedPolicy(ctx, name, userType, isGroup)
if err != nil && !errors.Is(err, errNoSuchPolicy) {
return updatedAt, err
}
if !isGroup {
delete(cache.iamUserPolicyMap, name)
} else {
delete(cache.iamGroupPolicyMap, name)
}
cache.updatedAt = time.Now()
return cache.updatedAt, nil
}
// Handle policy mapping set/update
mp := newMappedPolicy(policy)
for _, p := range mp.toSlice() {
if _, found := cache.iamPolicyDocsMap[p]; !found {
return updatedAt, errNoSuchPolicy
}
}
if err := store.saveMappedPolicy(ctx, name, userType, isGroup, mp); err != nil {
return updatedAt, err
}
if !isGroup {
cache.iamUserPolicyMap[name] = mp
} else {
cache.iamGroupPolicyMap[name] = mp
}
cache.updatedAt = time.Now()
return mp.UpdatedAt, nil
}
// PolicyNotificationHandler - loads given policy from storage. If not present,
// deletes from cache. This notification only reads from storage, and updates
// cache. When the notification is for a policy deletion, it updates the
// user-policy and group-policy maps as well.
func (store *IAMStoreSys) PolicyNotificationHandler(ctx context.Context, policy string) error {
if policy == "" {
return errInvalidArgument
}
cache := store.lock()
defer store.unlock()
err := store.loadPolicyDoc(ctx, policy, cache.iamPolicyDocsMap)
if errors.Is(err, errNoSuchPolicy) {
// policy was deleted, update cache.
delete(cache.iamPolicyDocsMap, policy)
// update user policy map
for u, mp := range cache.iamUserPolicyMap {
pset := mp.policySet()
if !pset.Contains(policy) {
continue
}
if store.getUsersSysType() == MinIOUsersSysType {
_, ok := cache.iamUsersMap[u]
if !ok {
// happens when account is deleted or
// expired.
delete(cache.iamUserPolicyMap, u)
continue
}
}
pset.Remove(policy)
cache.iamUserPolicyMap[u] = newMappedPolicy(strings.Join(pset.ToSlice(), ","))
}
// update group policy map
for g, mp := range cache.iamGroupPolicyMap {
pset := mp.policySet()
if !pset.Contains(policy) {
continue
}
pset.Remove(policy)
cache.iamGroupPolicyMap[g] = newMappedPolicy(strings.Join(pset.ToSlice(), ","))
}
cache.updatedAt = time.Now()
return nil
}
return err
}
// DeletePolicy - deletes policy from storage and cache.
func (store *IAMStoreSys) DeletePolicy(ctx context.Context, policy string) error {
if policy == "" {
return errInvalidArgument
}
cache := store.lock()
defer store.unlock()
// Check if policy is mapped to any existing user or group.
users := []string{}
groups := []string{}
for u, mp := range cache.iamUserPolicyMap {
pset := mp.policySet()
if store.getUsersSysType() == MinIOUsersSysType {
if _, ok := cache.iamUsersMap[u]; !ok {
// This case can happen when a temporary account is
// deleted or expired - remove it from userPolicyMap.
delete(cache.iamUserPolicyMap, u)
continue
}
}
if pset.Contains(policy) {
users = append(users, u)
}
}
for g, mp := range cache.iamGroupPolicyMap {
pset := mp.policySet()
if pset.Contains(policy) {
groups = append(groups, g)
}
}
if len(users) != 0 || len(groups) != 0 {
return errPolicyInUse
}
err := store.deletePolicyDoc(ctx, policy)
if errors.Is(err, errNoSuchPolicy) {
// Ignore error if policy is already deleted.
err = nil
}
if err != nil {
return err
}
delete(cache.iamPolicyDocsMap, policy)
cache.updatedAt = time.Now()
return nil
}
// GetPolicy - gets the policy definition. Allows specifying multiple comma
// separated policies - returns a combined policy.
func (store *IAMStoreSys) GetPolicy(name string) (iampolicy.Policy, error) {
if name == "" {
return iampolicy.Policy{}, errInvalidArgument
}
cache := store.rlock()
defer store.runlock()
policies := newMappedPolicy(name).toSlice()
var combinedPolicy iampolicy.Policy
for _, policy := range policies {
if policy == "" {
continue
}
v, ok := cache.iamPolicyDocsMap[policy]
if !ok {
return v.Policy, errNoSuchPolicy
}
combinedPolicy = combinedPolicy.Merge(v.Policy)
}
return combinedPolicy, nil
}
// GetPolicyDoc - gets the policy doc which has the policy and some metadata.
// Exactly one policy must be specified here.
func (store *IAMStoreSys) GetPolicyDoc(name string) (r PolicyDoc, err error) {
name = strings.TrimSpace(name)
if name == "" {
return r, errInvalidArgument
}
cache := store.rlock()
defer store.runlock()
v, ok := cache.iamPolicyDocsMap[name]
if !ok {
return r, errNoSuchPolicy
}
return v, nil
}
// SetPolicy - creates a policy with name.
func (store *IAMStoreSys) SetPolicy(ctx context.Context, name string, policy iampolicy.Policy) (time.Time, error) {
if policy.IsEmpty() || name == "" {
return time.Time{}, errInvalidArgument
}
cache := store.lock()
defer store.unlock()
var (
d PolicyDoc
ok bool
)
if d, ok = cache.iamPolicyDocsMap[name]; ok {
d.update(policy)
} else {
d = newPolicyDoc(policy)
}
if err := store.savePolicyDoc(ctx, name, d); err != nil {
return d.UpdateDate, err
}
cache.iamPolicyDocsMap[name] = d
cache.updatedAt = time.Now()
return d.UpdateDate, nil
}
// ListPolicies - fetches all policies from storage and updates cache as well.
// If bucketName is non-empty, returns policies matching the bucket.
func (store *IAMStoreSys) ListPolicies(ctx context.Context, bucketName string) (map[string]iampolicy.Policy, error) {
cache := store.lock()
defer store.unlock()
m := map[string]PolicyDoc{}
err := store.loadPolicyDocs(ctx, m)
if err != nil {
return nil, err
}
// Sets default canned policies
setDefaultCannedPolicies(m)
cache.iamPolicyDocsMap = m
cache.updatedAt = time.Now()
ret := map[string]iampolicy.Policy{}
for k, v := range m {
if bucketName == "" || v.Policy.MatchResource(bucketName) {
ret[k] = v.Policy
}
}
return ret, nil
}
// ListPolicyDocs - fetches all policy docs from storage and updates cache as well.
// If bucketName is non-empty, returns policy docs matching the bucket.
func (store *IAMStoreSys) ListPolicyDocs(ctx context.Context, bucketName string) (map[string]PolicyDoc, error) {
cache := store.lock()
defer store.unlock()
m := map[string]PolicyDoc{}
err := store.loadPolicyDocs(ctx, m)
if err != nil {
return nil, err
}
// Sets default canned policies
setDefaultCannedPolicies(m)
cache.iamPolicyDocsMap = m
cache.updatedAt = time.Now()
ret := map[string]PolicyDoc{}
for k, v := range m {
if bucketName == "" || v.Policy.MatchResource(bucketName) {
ret[k] = v
}
}
return ret, nil
}
// fetches all policy docs from cache.
// If bucketName is non-empty, returns policy docs matching the bucket.
func (store *IAMStoreSys) listPolicyDocs(ctx context.Context, bucketName string) (map[string]PolicyDoc, error) {
cache := store.rlock()
defer store.runlock()
ret := map[string]PolicyDoc{}
for k, v := range cache.iamPolicyDocsMap {
if bucketName == "" || v.Policy.MatchResource(bucketName) {
ret[k] = v
}
}
return ret, nil
}
// helper function - does not take locks.
func filterPolicies(cache *iamCache, policyName string, bucketName string) (string, iampolicy.Policy) {
var policies []string
mp := newMappedPolicy(policyName)
combinedPolicy := iampolicy.Policy{}
for _, policy := range mp.toSlice() {
if policy == "" {
continue
}
p, found := cache.iamPolicyDocsMap[policy]
if !found {
continue
}
if bucketName == "" || p.Policy.MatchResource(bucketName) {
policies = append(policies, policy)
combinedPolicy = combinedPolicy.Merge(p.Policy)
}
}
return strings.Join(policies, ","), combinedPolicy
}
// FilterPolicies - accepts a comma separated list of policy names as a string
// and bucket and returns only policies that currently exist in MinIO. If
// bucketName is non-empty, additionally filters policies matching the bucket.
// The first returned value is the list of currently existing policies, and the
// second is their combined policy definition.
func (store *IAMStoreSys) FilterPolicies(policyName string, bucketName string) (string, iampolicy.Policy) {
cache := store.rlock()
defer store.runlock()
return filterPolicies(cache, policyName, bucketName)
}
// GetBucketUsers - returns users (not STS or service accounts) that have access
// to the bucket. User is included even if a group policy that grants access to
// the bucket is disabled.
func (store *IAMStoreSys) GetBucketUsers(bucket string) (map[string]madmin.UserInfo, error) {
if bucket == "" {
return nil, errInvalidArgument
}
cache := store.rlock()
defer store.runlock()
result := map[string]madmin.UserInfo{}
for k, v := range cache.iamUsersMap {
c := v.Credentials
if c.IsTemp() || c.IsServiceAccount() {
continue
}
var policies []string
mp, ok := cache.iamUserPolicyMap[k]
if ok {
policies = append(policies, mp.Policies)
for _, group := range cache.iamUserGroupMemberships[k].ToSlice() {
if nmp, ok := cache.iamGroupPolicyMap[group]; ok {
policies = append(policies, nmp.Policies)
}
}
}
matchedPolicies, _ := filterPolicies(cache, strings.Join(policies, ","), bucket)
if len(matchedPolicies) > 0 {
result[k] = madmin.UserInfo{
PolicyName: matchedPolicies,
Status: func() madmin.AccountStatus {
if c.IsValid() {
return madmin.AccountEnabled
}
return madmin.AccountDisabled
}(),
MemberOf: cache.iamUserGroupMemberships[k].ToSlice(),
}
}
}
return result, nil
}
// GetUsers - returns all users (not STS or service accounts).
func (store *IAMStoreSys) GetUsers() map[string]madmin.UserInfo {
cache := store.rlock()
defer store.runlock()
result := map[string]madmin.UserInfo{}
for k, u := range cache.iamUsersMap {
v := u.Credentials
if v.IsTemp() || v.IsServiceAccount() {
continue
}
result[k] = madmin.UserInfo{
PolicyName: cache.iamUserPolicyMap[k].Policies,
Status: func() madmin.AccountStatus {
if v.IsValid() {
return madmin.AccountEnabled
}
return madmin.AccountDisabled
}(),
MemberOf: cache.iamUserGroupMemberships[k].ToSlice(),
UpdatedAt: cache.iamUserPolicyMap[k].UpdatedAt,
}
}
return result
}
// GetUsersWithMappedPolicies - safely returns the name of access keys with associated policies
func (store *IAMStoreSys) GetUsersWithMappedPolicies() map[string]string {
cache := store.rlock()
defer store.runlock()
result := make(map[string]string)
for k, v := range cache.iamUserPolicyMap {
result[k] = v.Policies
}
return result
}
// GetUserInfo - get info on a user.
func (store *IAMStoreSys) GetUserInfo(name string) (u madmin.UserInfo, err error) {
if name == "" {
return u, errInvalidArgument
}
cache := store.rlock()
defer store.runlock()
if store.getUsersSysType() != MinIOUsersSysType {
// If the user has a mapped policy or is a member of a group, we
// return that info. Otherwise we return error.
var groups []string
for _, v := range cache.iamUsersMap {
if v.Credentials.ParentUser == name {
groups = v.Credentials.Groups
break
}
}
mappedPolicy, ok := cache.iamUserPolicyMap[name]
if !ok {
return u, errNoSuchUser
}
return madmin.UserInfo{
PolicyName: mappedPolicy.Policies,
MemberOf: groups,
UpdatedAt: mappedPolicy.UpdatedAt,
}, nil
}
ui, found := cache.iamUsersMap[name]
if !found {
return u, errNoSuchUser
}
cred := ui.Credentials
if cred.IsTemp() || cred.IsServiceAccount() {
return u, errIAMActionNotAllowed
}
return madmin.UserInfo{
PolicyName: cache.iamUserPolicyMap[name].Policies,
Status: func() madmin.AccountStatus {
if cred.IsValid() {
return madmin.AccountEnabled
}
return madmin.AccountDisabled
}(),
MemberOf: cache.iamUserGroupMemberships[name].ToSlice(),
UpdatedAt: cache.iamUserPolicyMap[name].UpdatedAt,
}, nil
}
// PolicyMappingNotificationHandler - handles updating a policy mapping from storage.
func (store *IAMStoreSys) PolicyMappingNotificationHandler(ctx context.Context, userOrGroup string, isGroup bool, userType IAMUserType) error {
if userOrGroup == "" {
return errInvalidArgument
}
cache := store.lock()
defer store.unlock()
m := cache.iamGroupPolicyMap
if !isGroup {
m = cache.iamUserPolicyMap
}
err := store.loadMappedPolicy(ctx, userOrGroup, userType, isGroup, m)
if errors.Is(err, errNoSuchPolicy) {
// This means that the policy mapping was deleted, so we update
// the cache.
delete(m, userOrGroup)
cache.updatedAt = time.Now()
err = nil
}
return err
}
// UserNotificationHandler - handles updating a user/STS account/service account
// from storage.
func (store *IAMStoreSys) UserNotificationHandler(ctx context.Context, accessKey string, userType IAMUserType) error {
if accessKey == "" {
return errInvalidArgument
}
cache := store.lock()
defer store.unlock()
err := store.loadUser(ctx, accessKey, userType, cache.iamUsersMap)
if err == errNoSuchUser {
// User was deleted - we update the cache.
delete(cache.iamUsersMap, accessKey)
// 1. Start with updating user-group memberships
if store.getUsersSysType() == MinIOUsersSysType {
memberOf := cache.iamUserGroupMemberships[accessKey].ToSlice()
for _, group := range memberOf {
_, removeErr := removeMembersFromGroup(ctx, store, cache, group, []string{accessKey}, true)
if removeErr == errNoSuchGroup {
removeErr = nil
}
if removeErr != nil {
return removeErr
}
}
}
// 2. Remove any derived credentials from memory
if userType == regUser {
for _, u := range cache.iamUsersMap {
if u.Credentials.IsServiceAccount() && u.Credentials.ParentUser == accessKey {
delete(cache.iamUsersMap, u.Credentials.AccessKey)
}
if u.Credentials.IsTemp() && u.Credentials.ParentUser == accessKey {
delete(cache.iamUsersMap, u.Credentials.AccessKey)
}
}
}
// 3. Delete any mapped policy
delete(cache.iamUserPolicyMap, accessKey)
cache.updatedAt = time.Now()
return nil
}
if err != nil {
return err
}
if userType != svcUser {
err = store.loadMappedPolicy(ctx, accessKey, userType, false, cache.iamUserPolicyMap)
// Ignore policy not mapped error
if err != nil && !errors.Is(err, errNoSuchPolicy) {
return err
}
}
// We are on purpose not persisting the policy map for parent
// user, although this is a hack, it is a good enough hack
// at this point in time - we need to overhaul our OIDC
// usage with service accounts with a more cleaner implementation
//
// This mapping is necessary to ensure that valid credentials
// have necessary ParentUser present - this is mainly for only
// webIdentity based STS tokens.
u, ok := cache.iamUsersMap[accessKey]
if ok {
cred := u.Credentials
if cred.IsTemp() && cred.ParentUser != "" && cred.ParentUser != globalActiveCred.AccessKey {
if _, ok := cache.iamUserPolicyMap[cred.ParentUser]; !ok {
cache.iamUserPolicyMap[cred.ParentUser] = cache.iamUserPolicyMap[accessKey]
cache.updatedAt = time.Now()
}
}
}
return nil
}
// DeleteUser - deletes a user from storage and cache. This only used with
// long-term users and service accounts, not STS.
func (store *IAMStoreSys) DeleteUser(ctx context.Context, accessKey string, userType IAMUserType) error {
if accessKey == "" {
return errInvalidArgument
}
cache := store.lock()
defer store.unlock()
// first we remove the user from their groups.
if store.getUsersSysType() == MinIOUsersSysType && userType == regUser {
memberOf := cache.iamUserGroupMemberships[accessKey].ToSlice()
for _, group := range memberOf {
_, removeErr := removeMembersFromGroup(ctx, store, cache, group, []string{accessKey}, false)
if removeErr != nil {
return removeErr
}
}
}
// Now we can remove the user from memory and IAM store
// Delete any STS and service account derived from this credential
// first.
if userType == regUser {
for _, ui := range cache.iamUsersMap {
u := ui.Credentials
if u.ParentUser == accessKey {
switch {
case u.IsServiceAccount():
_ = store.deleteUserIdentity(ctx, u.AccessKey, svcUser)
delete(cache.iamUsersMap, u.AccessKey)
case u.IsTemp():
_ = store.deleteUserIdentity(ctx, u.AccessKey, stsUser)
delete(cache.iamUsersMap, u.AccessKey)
}
}
}
}
// It is ok to ignore deletion error on the mapped policy
store.deleteMappedPolicy(ctx, accessKey, userType, false)
delete(cache.iamUserPolicyMap, accessKey)
err := store.deleteUserIdentity(ctx, accessKey, userType)
if err == errNoSuchUser {
// ignore if user is already deleted.
err = nil
}
delete(cache.iamUsersMap, accessKey)
cache.updatedAt = time.Now()
return err
}
// SetTempUser - saves temporary (STS) credential to storage and cache. If a
// policy name is given, it is associated with the parent user specified in the
// credential.
func (store *IAMStoreSys) SetTempUser(ctx context.Context, accessKey string, cred auth.Credentials, policyName string) (time.Time, error) {
if accessKey == "" || !cred.IsTemp() || cred.IsExpired() || cred.ParentUser == "" {
return time.Time{}, errInvalidArgument
}
ttl := int64(cred.Expiration.Sub(UTCNow()).Seconds())
cache := store.lock()
defer store.unlock()
if policyName != "" {
mp := newMappedPolicy(policyName)
_, combinedPolicyStmt := filterPolicies(cache, mp.Policies, "")
if combinedPolicyStmt.IsEmpty() {
return time.Time{}, fmt.Errorf("specified policy %s, not found %w", policyName, errNoSuchPolicy)
}
err := store.saveMappedPolicy(ctx, cred.ParentUser, stsUser, false, mp, options{ttl: ttl})
if err != nil {
return time.Time{}, err
}
cache.iamUserPolicyMap[cred.ParentUser] = mp
}
u := newUserIdentity(cred)
err := store.saveUserIdentity(ctx, accessKey, stsUser, u, options{ttl: ttl})
if err != nil {
return time.Time{}, err
}
cache.iamUsersMap[accessKey] = u
cache.updatedAt = time.Now()
return u.UpdatedAt, nil
}
// DeleteUsers - given a set of users or access keys, deletes them along with
// any derived credentials (STS or service accounts) and any associated policy
// mappings.
func (store *IAMStoreSys) DeleteUsers(ctx context.Context, users []string) error {
cache := store.lock()
defer store.unlock()
var deleted bool
usersToDelete := set.CreateStringSet(users...)
for user, ui := range cache.iamUsersMap {
userType := regUser
cred := ui.Credentials
if cred.IsServiceAccount() {
userType = svcUser
} else if cred.IsTemp() {
userType = stsUser
}
if usersToDelete.Contains(user) || usersToDelete.Contains(cred.ParentUser) {
// Delete this user account and its policy mapping
store.deleteMappedPolicy(ctx, user, userType, false)
delete(cache.iamUserPolicyMap, user)
// we are only logging errors, not handling them.
err := store.deleteUserIdentity(ctx, user, userType)
logger.LogIf(GlobalContext, err)
delete(cache.iamUsersMap, user)
deleted = true
}
}
if deleted {
cache.updatedAt = time.Now()
}
return nil
}
// ParentUserInfo contains extra info about a the parent user.
type ParentUserInfo struct {
subClaimValue string
roleArns set.StringSet
}
// GetAllParentUsers - returns all distinct "parent-users" associated with STS
// or service credentials, mapped to all distinct roleARNs associated with the
// parent user. The dummy role ARN is associated with parent users from
// policy-claim based OpenID providers.
func (store *IAMStoreSys) GetAllParentUsers() map[string]ParentUserInfo {
cache := store.rlock()
defer store.runlock()
res := map[string]ParentUserInfo{}
for _, ui := range cache.iamUsersMap {
cred := ui.Credentials
// Only consider service account or STS credentials with
// non-empty session tokens.
if !(cred.IsServiceAccount() || cred.IsTemp()) ||
cred.SessionToken == "" {
continue
}
var (
err error
claims map[string]interface{} = cred.Claims
)
if cred.IsServiceAccount() {
claims, err = getClaimsFromTokenWithSecret(cred.SessionToken, cred.SecretKey)
} else if cred.IsTemp() {
claims, err = getClaimsFromTokenWithSecret(cred.SessionToken, globalActiveCred.SecretKey)
}
if err != nil {
continue
}
if cred.ParentUser == "" {
continue
}
subClaimValue := cred.ParentUser
if v, ok := claims[subClaim]; ok {
subFromToken, ok := v.(string)
if ok {
subClaimValue = subFromToken
}
}
roleArn := openid.DummyRoleARN.String()
s, ok := claims[roleArnClaim]
val, ok2 := s.(string)
if ok && ok2 {
roleArn = val
}
v, ok := res[cred.ParentUser]
if ok {
res[cred.ParentUser] = ParentUserInfo{
subClaimValue: subClaimValue,
roleArns: v.roleArns.Union(set.CreateStringSet(roleArn)),
}
} else {
res[cred.ParentUser] = ParentUserInfo{
subClaimValue: subClaimValue,
roleArns: set.CreateStringSet(roleArn),
}
}
}
return res
}
// Assumes store is locked by caller. If users is empty, returns all user mappings.
func (store *IAMStoreSys) listUserPolicyMappings(cache *iamCache, users []string,
userPredicate func(string) bool,
) []madmin.UserPolicyEntities {
var r []madmin.UserPolicyEntities
usersSet := set.CreateStringSet(users...)
for user, mappedPolicy := range cache.iamUserPolicyMap {
if userPredicate != nil && !userPredicate(user) {
continue
}
if !usersSet.IsEmpty() && !usersSet.Contains(user) {
continue
}
ps := mappedPolicy.toSlice()
sort.Strings(ps)
r = append(r, madmin.UserPolicyEntities{
User: user,
Policies: ps,
})
}
sort.Slice(r, func(i, j int) bool {
return r[i].User < r[j].User
})
return r
}
// Assumes store is locked by caller. If groups is empty, returns all group mappings.
func (store *IAMStoreSys) listGroupPolicyMappings(cache *iamCache, groups []string,
groupPredicate func(string) bool,
) []madmin.GroupPolicyEntities {
var r []madmin.GroupPolicyEntities
groupsSet := set.CreateStringSet(groups...)
for group, mappedPolicy := range cache.iamGroupPolicyMap {
if groupPredicate != nil && !groupPredicate(group) {
continue
}
if !groupsSet.IsEmpty() && !groupsSet.Contains(group) {
continue
}
ps := mappedPolicy.toSlice()
sort.Strings(ps)
r = append(r, madmin.GroupPolicyEntities{
Group: group,
Policies: ps,
})
}
sort.Slice(r, func(i, j int) bool {
return r[i].Group < r[j].Group
})
return r
}
// Assumes store is locked by caller. If policies is empty, returns all policy mappings.
func (store *IAMStoreSys) listPolicyMappings(cache *iamCache, policies []string,
userPredicate, groupPredicate func(string) bool,
) []madmin.PolicyEntities {
queryPolSet := set.CreateStringSet(policies...)
policyToUsersMap := make(map[string]set.StringSet)
for user, mappedPolicy := range cache.iamUserPolicyMap {
if userPredicate != nil && !userPredicate(user) {
continue
}
commonPolicySet := mappedPolicy.policySet()
if !queryPolSet.IsEmpty() {
commonPolicySet = commonPolicySet.Intersection(queryPolSet)
}
for _, policy := range commonPolicySet.ToSlice() {
s, ok := policyToUsersMap[policy]
if !ok {
policyToUsersMap[policy] = set.CreateStringSet(user)
} else {
s.Add(user)
policyToUsersMap[policy] = s
}
}
}
policyToGroupsMap := make(map[string]set.StringSet)
for group, mappedPolicy := range cache.iamGroupPolicyMap {
if groupPredicate != nil && !groupPredicate(group) {
continue
}
commonPolicySet := mappedPolicy.policySet()
if !queryPolSet.IsEmpty() {
commonPolicySet = commonPolicySet.Intersection(queryPolSet)
}
for _, policy := range commonPolicySet.ToSlice() {
s, ok := policyToGroupsMap[policy]
if !ok {
policyToGroupsMap[policy] = set.CreateStringSet(group)
} else {
s.Add(group)
policyToGroupsMap[policy] = s
}
}
}
m := make(map[string]madmin.PolicyEntities, len(policyToGroupsMap))
for policy, groups := range policyToGroupsMap {
s := groups.ToSlice()
sort.Strings(s)
m[policy] = madmin.PolicyEntities{
Policy: policy,
Groups: s,
}
}
for policy, users := range policyToUsersMap {
s := users.ToSlice()
sort.Strings(s)
// Update existing value in map
pe := m[policy]
pe.Policy = policy
pe.Users = s
m[policy] = pe
}
policyEntities := make([]madmin.PolicyEntities, 0, len(m))
for _, v := range m {
policyEntities = append(policyEntities, v)
}
sort.Slice(policyEntities, func(i, j int) bool {
return policyEntities[i].Policy < policyEntities[j].Policy
})
return policyEntities
}
// ListPolicyMappings - return users/groups mapped to policies.
func (store *IAMStoreSys) ListPolicyMappings(q madmin.PolicyEntitiesQuery,
userPredicate, groupPredicate func(string) bool,
) madmin.PolicyEntitiesResult {
cache := store.rlock()
defer store.runlock()
var result madmin.PolicyEntitiesResult
isAllPoliciesQuery := len(q.Users) == 0 && len(q.Groups) == 0 && len(q.Policy) == 0
if len(q.Users) > 0 {
result.UserMappings = store.listUserPolicyMappings(cache, q.Users, userPredicate)
}
if len(q.Groups) > 0 {
result.GroupMappings = store.listGroupPolicyMappings(cache, q.Groups, groupPredicate)
}
if len(q.Policy) > 0 || isAllPoliciesQuery {
result.PolicyMappings = store.listPolicyMappings(cache, q.Policy, userPredicate, groupPredicate)
}
return result
}
// SetUserStatus - sets current user status.
func (store *IAMStoreSys) SetUserStatus(ctx context.Context, accessKey string, status madmin.AccountStatus) (updatedAt time.Time, err error) {
if accessKey != "" && status != madmin.AccountEnabled && status != madmin.AccountDisabled {
return updatedAt, errInvalidArgument
}
cache := store.lock()
defer store.unlock()
ui, ok := cache.iamUsersMap[accessKey]
if !ok {
return updatedAt, errNoSuchUser
}
cred := ui.Credentials
if cred.IsTemp() || cred.IsServiceAccount() {
return updatedAt, errIAMActionNotAllowed
}
uinfo := newUserIdentity(auth.Credentials{
AccessKey: accessKey,
SecretKey: cred.SecretKey,
Status: func() string {
switch string(status) {
case string(madmin.AccountEnabled), string(auth.AccountOn):
return auth.AccountOn
}
return auth.AccountOff
}(),
})
if err := store.saveUserIdentity(ctx, accessKey, regUser, uinfo); err != nil {
return updatedAt, err
}
if err := cache.updateUserWithClaims(accessKey, uinfo); err != nil {
return updatedAt, err
}
return uinfo.UpdatedAt, nil
}
// AddServiceAccount - add a new service account
func (store *IAMStoreSys) AddServiceAccount(ctx context.Context, cred auth.Credentials) (updatedAt time.Time, err error) {
cache := store.lock()
defer store.unlock()
accessKey := cred.AccessKey
parentUser := cred.ParentUser
// Found newly requested service account, to be an existing account -
// reject such operation (updates to the service account are handled in
// a different API).
if su, found := cache.iamUsersMap[accessKey]; found {
scred := su.Credentials
if scred.ParentUser != parentUser {
return updatedAt, fmt.Errorf("%w: the service account access key is taken by another user", errIAMServiceAccountNotAllowed)
}
return updatedAt, fmt.Errorf("%w: the service account access key already taken", errIAMServiceAccountNotAllowed)
}
// Parent user must not be a service account.
if u, found := cache.iamUsersMap[parentUser]; found && u.Credentials.IsServiceAccount() {
return updatedAt, fmt.Errorf("%w: unable to create a service account for another service account", errIAMServiceAccountNotAllowed)
}
u := newUserIdentity(cred)
err = store.saveUserIdentity(ctx, u.Credentials.AccessKey, svcUser, u)
if err != nil {
return updatedAt, err
}
cache.updateUserWithClaims(u.Credentials.AccessKey, u)
return u.UpdatedAt, nil
}
// UpdateServiceAccount - updates a service account on storage.
func (store *IAMStoreSys) UpdateServiceAccount(ctx context.Context, accessKey string, opts updateServiceAccountOpts) (updatedAt time.Time, err error) {
cache := store.lock()
defer store.unlock()
ui, ok := cache.iamUsersMap[accessKey]
if !ok || !ui.Credentials.IsServiceAccount() {
return updatedAt, errNoSuchServiceAccount
}
cr := ui.Credentials
currentSecretKey := cr.SecretKey
if opts.secretKey != "" {
if !auth.IsSecretKeyValid(opts.secretKey) {
return updatedAt, auth.ErrInvalidSecretKeyLength
}
cr.SecretKey = opts.secretKey
}
if opts.name != "" {
cr.Name = opts.name
}
if opts.description != "" {
cr.Description = opts.description
}
if opts.expiration != nil {
expirationInUTC := opts.expiration.UTC()
if err := validateSvcExpirationInUTC(expirationInUTC); err != nil {
return updatedAt, err
}
cr.Expiration = expirationInUTC
}
switch opts.status {
// The caller did not ask to update status account, do nothing
case "":
case string(madmin.AccountEnabled):
cr.Status = auth.AccountOn
case string(madmin.AccountDisabled):
cr.Status = auth.AccountOff
// Update account status
case auth.AccountOn, auth.AccountOff:
cr.Status = opts.status
default:
return updatedAt, errors.New("unknown account status value")
}
m, err := getClaimsFromTokenWithSecret(cr.SessionToken, currentSecretKey)
if err != nil {
return updatedAt, fmt.Errorf("unable to get svc acc claims: %v", err)
}
// Extracted session policy name string can be removed as its not useful
// at this point.
delete(m, sessionPolicyNameExtracted)
// sessionPolicy is nil and there is embedded policy attached we remove
// embedded policy at that point.
if _, ok := m[iampolicy.SessionPolicyName]; ok && opts.sessionPolicy == nil {
delete(m, iampolicy.SessionPolicyName)
m[iamPolicyClaimNameSA()] = inheritedPolicyType
}
if opts.sessionPolicy != nil { // session policies is being updated
if err := opts.sessionPolicy.Validate(); err != nil {
return updatedAt, err
}
policyBuf, err := json.Marshal(opts.sessionPolicy)
if err != nil {
return updatedAt, err
}
if len(policyBuf) > 2048 {
return updatedAt, errSessionPolicyTooLarge
}
// Overwrite session policy claims.
m[iampolicy.SessionPolicyName] = base64.StdEncoding.EncodeToString(policyBuf)
m[iamPolicyClaimNameSA()] = embeddedPolicyType
}
cr.SessionToken, err = auth.JWTSignWithAccessKey(accessKey, m, cr.SecretKey)
if err != nil {
return updatedAt, err
}
u := newUserIdentity(cr)
if err := store.saveUserIdentity(ctx, u.Credentials.AccessKey, svcUser, u); err != nil {
return updatedAt, err
}
if err := cache.updateUserWithClaims(u.Credentials.AccessKey, u); err != nil {
return updatedAt, err
}
return u.UpdatedAt, nil
}
// ListTempAccounts - lists only temporary accounts from the cache.
func (store *IAMStoreSys) ListTempAccounts(ctx context.Context, accessKey string) ([]UserIdentity, error) {
cache := store.rlock()
defer store.runlock()
userExists := false
var tempAccounts []UserIdentity
for _, v := range cache.iamUsersMap {
isDerived := false
if v.Credentials.IsServiceAccount() || v.Credentials.IsTemp() {
isDerived = true
}
if !isDerived && v.Credentials.AccessKey == accessKey {
userExists = true
} else if isDerived && v.Credentials.ParentUser == accessKey {
userExists = true
if v.Credentials.IsTemp() {
// Hide secret key & session key here
v.Credentials.SecretKey = ""
v.Credentials.SessionToken = ""
tempAccounts = append(tempAccounts, v)
}
}
}
if !userExists {
return nil, errNoSuchUser
}
return tempAccounts, nil
}
// ListServiceAccounts - lists only service accounts from the cache.
func (store *IAMStoreSys) ListServiceAccounts(ctx context.Context, accessKey string) ([]auth.Credentials, error) {
cache := store.rlock()
defer store.runlock()
userExists := false
var serviceAccounts []auth.Credentials
for _, u := range cache.iamUsersMap {
isDerived := false
v := u.Credentials
if v.IsServiceAccount() || v.IsTemp() {
isDerived = true
}
if !isDerived && v.AccessKey == accessKey {
userExists = true
} else if isDerived && v.ParentUser == accessKey {
userExists = true
if v.IsServiceAccount() {
// Hide secret key & session key here
v.SecretKey = ""
v.SessionToken = ""
serviceAccounts = append(serviceAccounts, v)
}
}
}
if !userExists {
return nil, errNoSuchUser
}
return serviceAccounts, nil
}
// AddUser - adds/updates long term user account to storage.
func (store *IAMStoreSys) AddUser(ctx context.Context, accessKey string, ureq madmin.AddOrUpdateUserReq) (updatedAt time.Time, err error) {
cache := store.lock()
defer store.unlock()
cache.updatedAt = time.Now()
ui, ok := cache.iamUsersMap[accessKey]
// It is not possible to update an STS account.
if ok && ui.Credentials.IsTemp() {
return updatedAt, errIAMActionNotAllowed
}
u := newUserIdentity(auth.Credentials{
AccessKey: accessKey,
SecretKey: ureq.SecretKey,
Status: func() string {
switch string(ureq.Status) {
case string(madmin.AccountEnabled), string(auth.AccountOn):
return auth.AccountOn
}
return auth.AccountOff
}(),
})
if err := store.saveUserIdentity(ctx, accessKey, regUser, u); err != nil {
return updatedAt, err
}
if err := cache.updateUserWithClaims(accessKey, u); err != nil {
return updatedAt, err
}
return u.UpdatedAt, nil
}
// UpdateUserSecretKey - sets user secret key to storage.
func (store *IAMStoreSys) UpdateUserSecretKey(ctx context.Context, accessKey, secretKey string) error {
cache := store.lock()
defer store.unlock()
cache.updatedAt = time.Now()
ui, ok := cache.iamUsersMap[accessKey]
if !ok {
return errNoSuchUser
}
cred := ui.Credentials
cred.SecretKey = secretKey
u := newUserIdentity(cred)
if err := store.saveUserIdentity(ctx, accessKey, regUser, u); err != nil {
return err
}
return cache.updateUserWithClaims(accessKey, u)
}
// GetSTSAndServiceAccounts - returns all STS and Service account credentials.
func (store *IAMStoreSys) GetSTSAndServiceAccounts() []auth.Credentials {
cache := store.rlock()
defer store.runlock()
var res []auth.Credentials
for _, u := range cache.iamUsersMap {
cred := u.Credentials
if cred.IsTemp() || cred.IsServiceAccount() {
res = append(res, cred)
}
}
return res
}
// UpdateUserIdentity - updates a user credential.
func (store *IAMStoreSys) UpdateUserIdentity(ctx context.Context, cred auth.Credentials) error {
cache := store.lock()
defer store.unlock()
cache.updatedAt = time.Now()
userType := regUser
if cred.IsServiceAccount() {
userType = svcUser
} else if cred.IsTemp() {
userType = stsUser
}
ui := newUserIdentity(cred)
// Overwrite the user identity here. As store should be
// atomic, it shouldn't cause any corruption.
if err := store.saveUserIdentity(ctx, cred.AccessKey, userType, ui); err != nil {
return err
}
return cache.updateUserWithClaims(cred.AccessKey, ui)
}
// LoadUser - attempts to load user info from storage and updates cache.
func (store *IAMStoreSys) LoadUser(ctx context.Context, accessKey string) {
cache := store.lock()
defer store.unlock()
cache.updatedAt = time.Now()
_, found := cache.iamUsersMap[accessKey]
if !found {
store.loadUser(ctx, accessKey, regUser, cache.iamUsersMap)
if _, found = cache.iamUsersMap[accessKey]; found {
// load mapped policies
store.loadMappedPolicy(ctx, accessKey, regUser, false, cache.iamUserPolicyMap)
} else {
// check for service account
store.loadUser(ctx, accessKey, svcUser, cache.iamUsersMap)
if svc, found := cache.iamUsersMap[accessKey]; found {
// Load parent user and mapped policies.
if store.getUsersSysType() == MinIOUsersSysType {
store.loadUser(ctx, svc.Credentials.ParentUser, regUser, cache.iamUsersMap)
}
store.loadMappedPolicy(ctx, svc.Credentials.ParentUser, regUser, false, cache.iamUserPolicyMap)
} else {
// check for STS account
store.loadUser(ctx, accessKey, stsUser, cache.iamUsersMap)
if _, found = cache.iamUsersMap[accessKey]; found {
// Load mapped policy
store.loadMappedPolicy(ctx, accessKey, stsUser, false, cache.iamUserPolicyMap)
}
}
}
}
// Load any associated policy definitions
for _, policy := range cache.iamUserPolicyMap[accessKey].toSlice() {
if _, found = cache.iamPolicyDocsMap[policy]; !found {
store.loadPolicyDoc(ctx, policy, cache.iamPolicyDocsMap)
}
}
}
func extractJWTClaims(u UserIdentity) (*jwt.MapClaims, error) {
jwtClaims, err := auth.ExtractClaims(u.Credentials.SessionToken, u.Credentials.SecretKey)
if err != nil {
// Session tokens for STS creds will be generated with root secret
jwtClaims, err = auth.ExtractClaims(u.Credentials.SessionToken, globalActiveCred.SecretKey)
if err != nil {
return nil, err
}
}
return jwtClaims, nil
}
func validateSvcExpirationInUTC(expirationInUTC time.Time) error {
if expirationInUTC.IsZero() || expirationInUTC.Equal(timeSentinel) {
// Service accounts might not have expiration in older releases.
return nil
}
currentTime := time.Now().UTC()
minExpiration := currentTime.Add(minServiceAccountExpiry)
maxExpiration := currentTime.Add(maxServiceAccountExpiry)
if expirationInUTC.Before(minExpiration) || expirationInUTC.After(maxExpiration) {
return errInvalidSvcAcctExpiration
}
return nil
}
<file_sep>//go:build linux && !s390x && !arm && !386
// +build linux,!s390x,!arm,!386
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package disk
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
"syscall"
"golang.org/x/sys/unix"
)
// GetInfo returns total and free bytes available in a directory, e.g. `/`.
func GetInfo(path string) (info Info, err error) {
s := syscall.Statfs_t{}
err = syscall.Statfs(path, &s)
if err != nil {
return Info{}, err
}
reservedBlocks := s.Bfree - s.Bavail
info = Info{
Total: uint64(s.Frsize) * (s.Blocks - reservedBlocks),
Free: uint64(s.Frsize) * s.Bavail,
Files: s.Files,
Ffree: s.Ffree,
//nolint:unconvert
FSType: getFSType(int64(s.Type)),
}
// Check for overflows.
// https://github.com/minio/minio/issues/8035
// XFS can show wrong values at times error out
// in such scenarios.
if info.Free > info.Total {
return info, fmt.Errorf("detected free space (%d) > total drive space (%d), fs corruption at (%s). please run 'fsck'", info.Free, info.Total, path)
}
info.Used = info.Total - info.Free
st := syscall.Stat_t{}
err = syscall.Stat(path, &st)
if err != nil {
return Info{}, err
}
//nolint:unconvert
devID := uint64(st.Dev) // Needed to support multiple GOARCHs
info.Major = unix.Major(devID)
info.Minor = unix.Minor(devID)
return info, nil
}
const (
statsPath = "/proc/diskstats"
)
// GetAllDrivesIOStats returns IO stats of all drives found in the machine
func GetAllDrivesIOStats() (info AllDrivesIOStats, err error) {
proc, err := os.Open(statsPath)
if err != nil {
return nil, err
}
defer proc.Close()
ret := make(AllDrivesIOStats)
sc := bufio.NewScanner(proc)
for sc.Scan() {
line := sc.Text()
fields := strings.Fields(line)
if len(fields) < 11 {
continue
}
var err error
var ds IOStats
ds.ReadIOs, err = strconv.ParseUint((fields[3]), 10, 64)
if err != nil {
return ret, err
}
ds.ReadMerges, err = strconv.ParseUint((fields[4]), 10, 64)
if err != nil {
return ret, err
}
ds.ReadSectors, err = strconv.ParseUint((fields[5]), 10, 64)
if err != nil {
return ret, err
}
ds.ReadTicks, err = strconv.ParseUint((fields[6]), 10, 64)
if err != nil {
return ret, err
}
ds.WriteIOs, err = strconv.ParseUint((fields[7]), 10, 64)
if err != nil {
return ret, err
}
ds.WriteMerges, err = strconv.ParseUint((fields[8]), 10, 64)
if err != nil {
return ret, err
}
ds.WriteSectors, err = strconv.ParseUint((fields[9]), 10, 64)
if err != nil {
return ret, err
}
ds.WriteTicks, err = strconv.ParseUint((fields[10]), 10, 64)
if err != nil {
return ret, err
}
if len(fields) > 11 {
ds.CurrentIOs, err = strconv.ParseUint((fields[11]), 10, 64)
if err != nil {
return ret, err
}
ds.TotalTicks, err = strconv.ParseUint((fields[12]), 10, 64)
if err != nil {
return ret, err
}
ds.ReqTicks, err = strconv.ParseUint((fields[13]), 10, 64)
if err != nil {
return ret, err
}
}
if len(fields) > 14 {
ds.DiscardIOs, err = strconv.ParseUint((fields[14]), 10, 64)
if err != nil {
return ret, err
}
ds.DiscardMerges, err = strconv.ParseUint((fields[15]), 10, 64)
if err != nil {
return ret, err
}
ds.DiscardSectors, err = strconv.ParseUint((fields[16]), 10, 64)
if err != nil {
return ret, err
}
ds.DiscardTicks, err = strconv.ParseUint((fields[17]), 10, 64)
if err != nil {
return ret, err
}
}
if len(fields) > 18 {
ds.FlushIOs, err = strconv.ParseUint((fields[18]), 10, 64)
if err != nil {
return ret, err
}
ds.FlushTicks, err = strconv.ParseUint((fields[19]), 10, 64)
if err != nil {
return ret, err
}
}
major, err := strconv.ParseUint((fields[0]), 10, 32)
if err != nil {
return ret, err
}
minor, err := strconv.ParseUint((fields[1]), 10, 32)
if err != nil {
return ret, err
}
ret[DevID{uint32(major), uint32(minor)}] = ds
}
if err := sc.Err(); err != nil {
return nil, err
}
return ret, nil
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"log"
)
func bytesToPrivateKey(priv []byte) (*rsa.PrivateKey, error) {
// Try PEM
if block, _ := pem.Decode(priv); block != nil {
return x509.ParsePKCS1PrivateKey(block.Bytes)
}
// Try base 64
dst := make([]byte, base64.StdEncoding.DecodedLen(len(priv)))
if n, err := base64.StdEncoding.Decode(dst, priv); err == nil {
return x509.ParsePKCS1PrivateKey(dst[:n])
}
// Try Raw, return error
return x509.ParsePKCS1PrivateKey(priv)
}
func fatalErr(err error) {
if err == nil {
return
}
log.Fatalln(err)
}
func fatalIf(b bool, msg string, v ...interface{}) {
if !b {
return
}
log.Fatalf(msg, v...)
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package pubsub
import (
"fmt"
"sync"
"sync/atomic"
)
// Sub - subscriber entity.
type Sub[T Maskable] struct {
ch chan T
types Mask
filter func(entry T) bool
}
// PubSub holds publishers and subscribers
type PubSub[T Maskable, M Maskable] struct {
// atomics, keep at top:
types uint64
numSubscribers int32
maxSubscribers int32
// not atomics:
subs []*Sub[T]
sync.RWMutex
}
// Publish message to the subscribers.
// Note that publish is always nob-blocking send so that we don't block on slow receivers.
// Hence receivers should use buffered channel so as not to miss the published events.
func (ps *PubSub[T, M]) Publish(item T) {
ps.RLock()
defer ps.RUnlock()
for _, sub := range ps.subs {
if sub.types.Contains(Mask(item.Mask())) && (sub.filter == nil || sub.filter(item)) {
select {
case sub.ch <- item:
default:
}
}
}
}
// Subscribe - Adds a subscriber to pubsub system
func (ps *PubSub[T, M]) Subscribe(mask M, subCh chan T, doneCh <-chan struct{}, filter func(entry T) bool) error {
totalSubs := atomic.AddInt32(&ps.numSubscribers, 1)
if ps.maxSubscribers > 0 && totalSubs > ps.maxSubscribers {
atomic.AddInt32(&ps.numSubscribers, -1)
return fmt.Errorf("the limit of `%d` subscribers is reached", ps.maxSubscribers)
}
ps.Lock()
defer ps.Unlock()
sub := &Sub[T]{ch: subCh, types: Mask(mask.Mask()), filter: filter}
ps.subs = append(ps.subs, sub)
// We hold a lock, so we are safe to update
combined := Mask(atomic.LoadUint64(&ps.types))
combined.Merge(Mask(mask.Mask()))
atomic.StoreUint64(&ps.types, uint64(combined))
go func() {
<-doneCh
ps.Lock()
defer ps.Unlock()
var remainTypes Mask
for i, s := range ps.subs {
if s == sub {
ps.subs = append(ps.subs[:i], ps.subs[i+1:]...)
} else {
remainTypes.Merge(s.types)
}
}
atomic.StoreUint64(&ps.types, uint64(remainTypes))
atomic.AddInt32(&ps.numSubscribers, -1)
}()
return nil
}
// NumSubscribers returns the number of current subscribers,
// The mask is checked against the active subscribed types,
// and 0 will be returned if nobody is subscribed for the type(s).
func (ps *PubSub[T, M]) NumSubscribers(mask M) int32 {
types := Mask(atomic.LoadUint64(&ps.types))
if !types.Overlaps(Mask(mask.Mask())) {
return 0
}
return atomic.LoadInt32(&ps.numSubscribers)
}
// Subscribers returns the number of current subscribers for all types.
func (ps *PubSub[T, M]) Subscribers() int32 {
return atomic.LoadInt32(&ps.numSubscribers)
}
// New inits a PubSub system with a limit of maximum
// subscribers unless zero is specified
func New[T Maskable, M Maskable](maxSubscribers int32) *PubSub[T, M] {
return &PubSub[T, M]{maxSubscribers: maxSubscribers}
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bufio"
"context"
"encoding/xml"
"io"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
"github.com/google/uuid"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/encrypt"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/internal/amztime"
sse "github.com/minio/minio/internal/bucket/encryption"
objectlock "github.com/minio/minio/internal/bucket/object/lock"
"github.com/minio/minio/internal/bucket/replication"
"github.com/minio/minio/internal/config/dns"
"github.com/minio/minio/internal/config/storageclass"
"github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/etag"
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/fips"
"github.com/minio/minio/internal/handlers"
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/bucket/policy"
iampolicy "github.com/minio/pkg/iam/policy"
"github.com/minio/sio"
)
// Multipart objectAPIHandlers
// NewMultipartUploadHandler - New multipart upload.
// Notice: The S3 client can send secret keys in headers for encryption related jobs,
// the handler should ensure to remove these keys before sending them to the object layer.
// Currently these keys are:
// - X-Amz-Server-Side-Encryption-Customer-Key
// - X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key
func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "NewMultipartUpload")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, bucket, object); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
// Check if bucket encryption is enabled
sseConfig, _ := globalBucketSSEConfigSys.Get(bucket)
sseConfig.Apply(r.Header, sse.ApplyOptions{
AutoEncrypt: globalAutoEncryption,
})
// Validate storage class metadata if present
if sc := r.Header.Get(xhttp.AmzStorageClass); sc != "" {
if !storageclass.IsValid(sc) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidStorageClass), r.URL)
return
}
}
encMetadata := map[string]string{}
if crypto.Requested(r.Header) {
if crypto.SSECopy.IsRequested(r.Header) {
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL)
return
}
if crypto.SSEC.IsRequested(r.Header) && crypto.S3.IsRequested(r.Header) {
writeErrorResponse(ctx, w, toAPIError(ctx, crypto.ErrIncompatibleEncryptionMethod), r.URL)
return
}
if crypto.SSEC.IsRequested(r.Header) && crypto.S3KMS.IsRequested(r.Header) {
writeErrorResponse(ctx, w, toAPIError(ctx, crypto.ErrIncompatibleEncryptionMethod), r.URL)
return
}
if crypto.SSEC.IsRequested(r.Header) && isReplicationEnabled(ctx, bucket) {
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParametersSSEC), r.URL)
return
}
if err = setEncryptionMetadata(r, bucket, object, encMetadata); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Set this for multipart only operations, we need to differentiate during
// decryption if the file was actually multipart or not.
encMetadata[ReservedMetadataPrefix+"Encrypted-Multipart"] = ""
}
// Extract metadata that needs to be saved.
metadata, err := extractMetadata(ctx, r)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if objTags := r.Header.Get(xhttp.AmzObjectTagging); objTags != "" {
if _, err := tags.ParseObjectTags(objTags); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
metadata[xhttp.AmzObjectTagging] = objTags
}
retPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectRetentionAction)
holdPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectLegalHoldAction)
getObjectInfo := objectAPI.GetObjectInfo
if api.CacheAPI() != nil {
getObjectInfo = api.CacheAPI().GetObjectInfo
}
retentionMode, retentionDate, legalHold, s3Err := checkPutObjectLockAllowed(ctx, r, bucket, object, getObjectInfo, retPerms, holdPerms)
if s3Err == ErrNone && retentionMode.Valid() {
metadata[strings.ToLower(xhttp.AmzObjectLockMode)] = string(retentionMode)
metadata[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = amztime.ISO8601Format(retentionDate.UTC())
}
if s3Err == ErrNone && legalHold.Status.Valid() {
metadata[strings.ToLower(xhttp.AmzObjectLockLegalHold)] = string(legalHold.Status)
}
if s3Err != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
if dsc := mustReplicate(ctx, bucket, object, getMustReplicateOptions(ObjectInfo{
UserDefined: metadata,
}, replication.ObjectReplicationType, ObjectOptions{})); dsc.ReplicateAny() {
metadata[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano)
metadata[ReservedMetadataPrefixLower+ReplicationStatus] = dsc.PendingStatus()
}
// We need to preserve the encryption headers set in EncryptRequest,
// so we do not want to override them, copy them instead.
for k, v := range encMetadata {
metadata[k] = v
}
// Ensure that metadata does not contain sensitive information
crypto.RemoveSensitiveEntries(metadata)
if isCompressible(r.Header, object) {
// Storing the compression metadata.
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2
}
opts, err := putOpts(ctx, r, bucket, object, metadata)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if !opts.MTime.IsZero() && opts.PreserveETag != "" {
opts.CheckPrecondFn = func(oi ObjectInfo) bool {
if _, err := DecryptObjectInfo(&oi, r); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return true
}
return checkPreconditionsPUT(ctx, w, r, oi, opts)
}
}
checksumType := hash.NewChecksumType(r.Header.Get(xhttp.AmzChecksumAlgo))
if checksumType.Is(hash.ChecksumInvalid) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequestParameter), r.URL)
return
} else if checksumType.IsSet() && !checksumType.Is(hash.ChecksumTrailing) {
opts.WantChecksum = &hash.Checksum{Type: checksumType}
}
newMultipartUpload := objectAPI.NewMultipartUpload
if api.CacheAPI() != nil {
newMultipartUpload = api.CacheAPI().NewMultipartUpload
}
res, err := newMultipartUpload(ctx, bucket, object, opts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
response := generateInitiateMultipartUploadResponse(bucket, object, res.UploadID)
if res.ChecksumAlgo != "" {
w.Header().Set(xhttp.AmzChecksumAlgo, res.ChecksumAlgo)
}
encodedSuccessResponse := encodeResponse(response)
// Write success response.
writeSuccessResponseXML(w, encodedSuccessResponse)
}
// CopyObjectPartHandler - uploads a part by copying data from an existing object as data source.
func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "CopyObjectPart")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
if crypto.S3KMS.IsRequested(r.Header) { // SSE-KMS is not supported
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
vars := mux.Vars(r)
dstBucket := vars["bucket"]
dstObject, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, dstBucket, dstObject); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
// Read escaped copy source path to check for parameters.
cpSrcPath := r.Header.Get(xhttp.AmzCopySource)
var vid string
if u, err := url.Parse(cpSrcPath); err == nil {
vid = strings.TrimSpace(u.Query().Get(xhttp.VersionID))
// Note that url.Parse does the unescaping
cpSrcPath = u.Path
}
srcBucket, srcObject := path2BucketObject(cpSrcPath)
// If source object is empty or bucket is empty, reply back invalid copy source.
if srcObject == "" || srcBucket == "" {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL)
return
}
if vid != "" && vid != nullVersionID {
_, err := uuid.Parse(vid)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, VersionNotFound{
Bucket: srcBucket,
Object: srcObject,
VersionID: vid,
}), r.URL)
return
}
}
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, srcBucket, srcObject); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
uploadID := r.Form.Get(xhttp.UploadID)
partIDString := r.Form.Get(xhttp.PartNumber)
partID, err := strconv.Atoi(partIDString)
if err != nil || partID <= 0 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPart), r.URL)
return
}
// check partID with maximum part ID for multipart objects
if isMaxPartID(partID) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidMaxParts), r.URL)
return
}
var srcOpts, dstOpts ObjectOptions
srcOpts, err = copySrcOpts(ctx, r, srcBucket, srcObject)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
srcOpts.VersionID = vid
// convert copy src and dst encryption options for GET/PUT calls
getOpts := ObjectOptions{VersionID: srcOpts.VersionID}
if srcOpts.ServerSideEncryption != nil {
getOpts.ServerSideEncryption = encrypt.SSE(srcOpts.ServerSideEncryption)
}
dstOpts, err = copyDstOpts(ctx, r, dstBucket, dstObject, nil)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
getObjectNInfo := objectAPI.GetObjectNInfo
if api.CacheAPI() != nil {
getObjectNInfo = api.CacheAPI().GetObjectNInfo
}
// Get request range.
var rs *HTTPRangeSpec
var parseRangeErr error
if rangeHeader := r.Header.Get(xhttp.AmzCopySourceRange); rangeHeader != "" {
rs, parseRangeErr = parseCopyPartRangeSpec(rangeHeader)
} else {
// This check is to see if client specified a header but the value
// is empty for 'x-amz-copy-source-range'
_, ok := r.Header[xhttp.AmzCopySourceRange]
if ok {
parseRangeErr = errInvalidRange
}
}
checkCopyPartPrecondFn := func(o ObjectInfo) bool {
if _, err := DecryptObjectInfo(&o, r); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return true
}
if checkCopyObjectPartPreconditions(ctx, w, r, o) {
return true
}
if parseRangeErr != nil {
logger.LogIf(ctx, parseRangeErr)
writeCopyPartErr(ctx, w, parseRangeErr, r.URL)
// Range header mismatch is pre-condition like failure
// so return true to indicate Range precondition failed.
return true
}
return false
}
getOpts.CheckPrecondFn = checkCopyPartPrecondFn
gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, getOpts)
if err != nil {
if isErrPreconditionFailed(err) {
return
}
if globalBucketVersioningSys.PrefixEnabled(srcBucket, srcObject) && gr != nil {
// Versioning enabled quite possibly object is deleted might be delete-marker
// if present set the headers, no idea why AWS S3 sets these headers.
if gr.ObjInfo.VersionID != "" && gr.ObjInfo.DeleteMarker {
w.Header()[xhttp.AmzVersionID] = []string{gr.ObjInfo.VersionID}
w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(gr.ObjInfo.DeleteMarker)}
}
}
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
defer gr.Close()
srcInfo := gr.ObjInfo
actualPartSize, err := srcInfo.GetActualSize()
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if err := enforceBucketQuotaHard(ctx, dstBucket, actualPartSize); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Special care for CopyObjectPart
if partRangeErr := checkCopyPartRangeWithSize(rs, actualPartSize); partRangeErr != nil {
writeCopyPartErr(ctx, w, partRangeErr, r.URL)
return
}
// Get the object offset & length
startOffset, length, err := rs.GetOffsetLength(actualPartSize)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// maximum copy size for multipart objects in a single operation
if isMaxObjectSize(length) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
return
}
if isRemoteCopyRequired(ctx, srcBucket, dstBucket, objectAPI) {
var dstRecords []dns.SrvRecord
dstRecords, err = globalDNSConfig.Get(dstBucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Send PutObject request to appropriate instance (in federated deployment)
core, rerr := getRemoteInstanceClient(r, getHostFromSrv(dstRecords))
if rerr != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, rerr), r.URL)
return
}
popts := minio.PutObjectPartOptions{
SSE: dstOpts.ServerSideEncryption,
}
partInfo, err := core.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, gr, length, popts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
response := generateCopyObjectPartResponse(partInfo.ETag, partInfo.LastModified)
encodedSuccessResponse := encodeResponse(response)
// Write success response.
writeSuccessResponseXML(w, encodedSuccessResponse)
return
}
actualPartSize = length
var reader io.Reader = etag.NewReader(gr, nil)
mi, err := objectAPI.GetMultipartInfo(ctx, dstBucket, dstObject, uploadID, dstOpts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Read compression metadata preserved in the init multipart for the decision.
_, isCompressed := mi.UserDefined[ReservedMetadataPrefix+"compression"]
// Compress only if the compression is enabled during initial multipart.
var idxCb func() []byte
if isCompressed {
wantEncryption := crypto.Requested(r.Header)
s2c, cb := newS2CompressReader(reader, actualPartSize, wantEncryption)
idxCb = cb
defer s2c.Close()
reader = etag.Wrap(s2c, reader)
length = -1
}
srcInfo.Reader, err = hash.NewReader(reader, length, "", "", actualPartSize)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
dstOpts, err = copyDstOpts(ctx, r, dstBucket, dstObject, mi.UserDefined)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
dstOpts.IndexCB = idxCb
rawReader := srcInfo.Reader
pReader := NewPutObjReader(rawReader)
_, isEncrypted := crypto.IsEncrypted(mi.UserDefined)
var objectEncryptionKey crypto.ObjectKey
if isEncrypted {
if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(mi.UserDefined) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSSEMultipartEncrypted), r.URL)
return
}
if crypto.S3.IsEncrypted(mi.UserDefined) && crypto.SSEC.IsRequested(r.Header) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSSEMultipartEncrypted), r.URL)
return
}
var key []byte
if crypto.SSEC.IsRequested(r.Header) {
key, err = ParseSSECustomerRequest(r)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}
key, err = decryptObjectMeta(key, dstBucket, dstObject, mi.UserDefined)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
copy(objectEncryptionKey[:], key)
partEncryptionKey := objectEncryptionKey.DerivePartKey(uint32(partID))
encReader, err := sio.EncryptReader(reader, sio.Config{Key: partEncryptionKey[:], CipherSuites: fips.DARECiphers()})
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
reader = etag.Wrap(encReader, reader)
wantSize := int64(-1)
if length >= 0 {
info := ObjectInfo{Size: length}
wantSize = info.EncryptedSize()
}
srcInfo.Reader, err = hash.NewReader(reader, wantSize, "", "", actualPartSize)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
pReader, err = pReader.WithEncryption(srcInfo.Reader, &objectEncryptionKey)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if dstOpts.IndexCB != nil {
dstOpts.IndexCB = compressionIndexEncrypter(objectEncryptionKey, dstOpts.IndexCB)
}
}
srcInfo.PutObjReader = pReader
copyObjectPart := objectAPI.CopyObjectPart
if api.CacheAPI() != nil {
copyObjectPart = api.CacheAPI().CopyObjectPart
}
// Copy source object to destination, if source and destination
// object is same then only metadata is updated.
partInfo, err := copyObjectPart(ctx, srcBucket, srcObject, dstBucket, dstObject, uploadID, partID,
startOffset, length, srcInfo, srcOpts, dstOpts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if isEncrypted {
partInfo.ETag = tryDecryptETag(objectEncryptionKey[:], partInfo.ETag, crypto.S3.IsRequested(r.Header))
}
response := generateCopyObjectPartResponse(partInfo.ETag, partInfo.LastModified)
encodedSuccessResponse := encodeResponse(response)
// Write success response.
writeSuccessResponseXML(w, encodedSuccessResponse)
}
// PutObjectPartHandler - uploads an incoming part for an ongoing multipart operation.
func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutObjectPart")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// X-Amz-Copy-Source shouldn't be set for this call.
if _, ok := r.Header[xhttp.AmzCopySource]; ok {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL)
return
}
clientETag, err := etag.FromContentMD5(r.Header)
if err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDigest), r.URL)
return
}
// if Content-Length is unknown/missing, throw away
size := r.ContentLength
rAuthType := getRequestAuthType(r)
// For auth type streaming signature, we need to gather a different content length.
switch rAuthType {
// Check signature types that must have content length
case authTypeStreamingSigned, authTypeStreamingSignedTrailer, authTypeStreamingUnsignedTrailer:
if sizeStr, ok := r.Header[xhttp.AmzDecodedContentLength]; ok {
if sizeStr[0] == "" {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL)
return
}
size, err = strconv.ParseInt(sizeStr[0], 10, 64)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}
}
if size == -1 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL)
return
}
uploadID := r.Form.Get(xhttp.UploadID)
partIDString := r.Form.Get(xhttp.PartNumber)
partID, err := strconv.Atoi(partIDString)
if err != nil || partID <= 0 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPart), r.URL)
return
}
// maximum size for multipart objects in a single operation
if isMaxObjectSize(size) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
return
}
// check partID with maximum part ID for multipart objects
if isMaxPartID(partID) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidMaxParts), r.URL)
return
}
var (
md5hex = clientETag.String()
sha256hex = ""
reader io.Reader = r.Body
s3Error APIErrorCode
)
if s3Error = isPutActionAllowed(ctx, rAuthType, bucket, object, r, iampolicy.PutObjectAction); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
switch rAuthType {
case authTypeStreamingSigned, authTypeStreamingSignedTrailer:
// Initialize stream signature verifier.
reader, s3Error = newSignV4ChunkedReader(r, rAuthType == authTypeStreamingSignedTrailer)
if s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
case authTypeStreamingUnsignedTrailer:
// Initialize stream signature verifier.
reader, s3Error = newUnsignedV4ChunkedReader(r, true)
if s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
case authTypeSignedV2, authTypePresignedV2:
if s3Error = isReqAuthenticatedV2(r); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
case authTypePresigned, authTypeSigned:
if s3Error = reqSignatureV4Verify(r, globalSite.Region, serviceS3); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
if !skipContentSha256Cksum(r) {
sha256hex = getContentSha256Cksum(r, serviceS3)
}
}
if err := enforceBucketQuotaHard(ctx, bucket, size); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
actualSize := size
// get encryption options
var opts ObjectOptions
if crypto.SSEC.IsRequested(r.Header) {
opts, err = getOpts(ctx, r, bucket, object)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}
mi, err := objectAPI.GetMultipartInfo(ctx, bucket, object, uploadID, opts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Read compression metadata preserved in the init multipart for the decision.
_, isCompressed := mi.UserDefined[ReservedMetadataPrefix+"compression"]
var idxCb func() []byte
if isCompressed {
actualReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if err = actualReader.AddChecksum(r, false); err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL)
return
}
// Set compression metrics.
wantEncryption := crypto.Requested(r.Header)
s2c, cb := newS2CompressReader(actualReader, actualSize, wantEncryption)
idxCb = cb
defer s2c.Close()
reader = etag.Wrap(s2c, actualReader)
size = -1 // Since compressed size is un-predictable.
md5hex = "" // Do not try to verify the content.
sha256hex = ""
}
hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if err := hashReader.AddChecksum(r, size < 0); err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL)
return
}
pReader := NewPutObjReader(hashReader)
_, isEncrypted := crypto.IsEncrypted(mi.UserDefined)
var objectEncryptionKey crypto.ObjectKey
if isEncrypted {
if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(mi.UserDefined) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSSEMultipartEncrypted), r.URL)
return
}
opts, err = putOpts(ctx, r, bucket, object, mi.UserDefined)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
var key []byte
if crypto.SSEC.IsRequested(r.Header) {
key, err = ParseSSECustomerRequest(r)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}
// Calculating object encryption key
key, err = decryptObjectMeta(key, bucket, object, mi.UserDefined)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
copy(objectEncryptionKey[:], key)
partEncryptionKey := objectEncryptionKey.DerivePartKey(uint32(partID))
in := io.Reader(hashReader)
if size > encryptBufferThreshold {
// The encryption reads in blocks of 64KB.
// We add a buffer on bigger files to reduce the number of syscalls upstream.
in = bufio.NewReaderSize(hashReader, encryptBufferSize)
}
reader, err = sio.EncryptReader(in, sio.Config{Key: partEncryptionKey[:], CipherSuites: fips.DARECiphers()})
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
wantSize := int64(-1)
if size >= 0 {
info := ObjectInfo{Size: size}
wantSize = info.EncryptedSize()
}
// do not try to verify encrypted content
hashReader, err = hash.NewReader(etag.Wrap(reader, hashReader), wantSize, "", "", actualSize)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if err := hashReader.AddChecksum(r, true); err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL)
return
}
pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if idxCb != nil {
idxCb = compressionIndexEncrypter(objectEncryptionKey, idxCb)
}
opts.EncryptFn = metadataEncrypter(objectEncryptionKey)
}
opts.IndexCB = idxCb
putObjectPart := objectAPI.PutObjectPart
if api.CacheAPI() != nil {
putObjectPart = api.CacheAPI().PutObjectPart
}
partInfo, err := putObjectPart(ctx, bucket, object, uploadID, partID, pReader, opts)
if err != nil {
// Verify if the underlying error is signature mismatch.
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
etag := partInfo.ETag
if kind, encrypted := crypto.IsEncrypted(mi.UserDefined); encrypted {
switch kind {
case crypto.S3KMS:
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionKMS)
w.Header().Set(xhttp.AmzServerSideEncryptionKmsID, mi.KMSKeyID())
if kmsCtx, ok := mi.UserDefined[crypto.MetaContext]; ok {
w.Header().Set(xhttp.AmzServerSideEncryptionKmsContext, kmsCtx)
}
if len(etag) >= 32 && strings.Count(etag, "-") != 1 {
etag = etag[len(etag)-32:]
}
case crypto.S3:
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
etag, _ = DecryptETag(objectEncryptionKey, ObjectInfo{ETag: etag})
case crypto.SSEC:
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm))
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5))
if len(etag) >= 32 && strings.Count(etag, "-") != 1 {
etag = etag[len(etag)-32:]
}
}
}
// We must not use the http.Header().Set method here because some (broken)
// clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive).
// Therefore, we have to set the ETag directly as map entry.
w.Header()[xhttp.ETag] = []string{"\"" + etag + "\""}
hash.TransferChecksumHeader(w, r)
writeSuccessResponseHeadersOnly(w)
}
// CompleteMultipartUploadHandler - Complete multipart upload.
func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "CompleteMultipartUpload")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, bucket, object); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
// Get upload id.
uploadID, _, _, _, s3Error := getObjectResources(r.Form)
if s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
// Content-Length is required and should be non-zero
if r.ContentLength <= 0 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingPart), r.URL)
return
}
complMultipartUpload := &CompleteMultipartUpload{}
if err = xmlDecoder(r.Body, complMultipartUpload, r.ContentLength); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if len(complMultipartUpload.Parts) == 0 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingPart), r.URL)
return
}
if !sort.SliceIsSorted(complMultipartUpload.Parts, func(i, j int) bool {
return complMultipartUpload.Parts[i].PartNumber < complMultipartUpload.Parts[j].PartNumber
}) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPartOrder), r.URL)
return
}
// Reject retention or governance headers if set, CompleteMultipartUpload spec
// does not use these headers, and should not be passed down to checkPutObjectLockAllowed
if objectlock.IsObjectLockRequested(r.Header) || objectlock.IsObjectLockGovernanceBypassSet(r.Header) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
if _, _, _, s3Err := checkPutObjectLockAllowed(ctx, r, bucket, object, objectAPI.GetObjectInfo, ErrNone, ErrNone); s3Err != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
completeMultiPartUpload := objectAPI.CompleteMultipartUpload
if api.CacheAPI() != nil {
completeMultiPartUpload = api.CacheAPI().CompleteMultipartUpload
}
// This code is specifically to handle the requirements for slow
// complete multipart upload operations on FS mode.
writeErrorResponseWithoutXMLHeader := func(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
switch err.Code {
case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum":
// Set retxry-after header to indicate user-agents to retry request after 120secs.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
w.Header().Set(xhttp.RetryAfter, "120")
}
// Generate error response.
errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path,
w.Header().Get(xhttp.AmzRequestID), globalDeploymentID)
encodedErrorResponse, _ := xml.Marshal(errorResponse)
setCommonHeaders(w)
w.Header().Set(xhttp.ContentType, string(mimeXML))
w.Write(encodedErrorResponse)
}
versioned := globalBucketVersioningSys.PrefixEnabled(bucket, object)
suspended := globalBucketVersioningSys.PrefixSuspended(bucket, object)
os := newObjSweeper(bucket, object).WithVersioning(versioned, suspended)
if !globalTierConfigMgr.Empty() {
// Get appropriate object info to identify the remote object to delete
goiOpts := os.GetOpts()
if goi, gerr := objectAPI.GetObjectInfo(ctx, bucket, object, goiOpts); gerr == nil {
os.SetTransitionState(goi.TransitionedObject)
}
}
setEventStreamHeaders(w)
opts, err := completeMultipartOpts(ctx, r, bucket, object)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// First, we compute the ETag of the multipart object.
// The ETag of a multi-part object is always:
// ETag := MD5(ETag_p1, ETag_p2, ...)+"-N" (N being the number of parts)
//
// This is independent of encryption. An encrypted multipart
// object also has an ETag that is the MD5 of its part ETags.
// The fact the in case of encryption the ETag of a part is
// not the MD5 of the part content does not change that.
var completeETags []etag.ETag
for _, part := range complMultipartUpload.Parts {
ETag, err := etag.Parse(part.ETag)
if err != nil {
continue
}
completeETags = append(completeETags, ETag)
}
multipartETag := etag.Multipart(completeETags...)
opts.UserDefined["etag"] = multipartETag.String()
w = &whiteSpaceWriter{ResponseWriter: w, Flusher: w.(http.Flusher)}
completeDoneCh := sendWhiteSpace(ctx, w)
objInfo, err := completeMultiPartUpload(ctx, bucket, object, uploadID, complMultipartUpload.Parts, opts)
// Stop writing white spaces to the client. Note that close(doneCh) style is not used as it
// can cause white space to be written after we send XML response in a race condition.
headerWritten := <-completeDoneCh
if err != nil {
if headerWritten {
writeErrorResponseWithoutXMLHeader(ctx, w, toAPIError(ctx, err), r.URL)
} else {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
}
return
}
// Get object location.
location := getObjectLocation(r, globalDomainNames, bucket, object)
// Generate complete multipart response.
response := generateCompleteMultpartUploadResponse(bucket, object, location, objInfo)
var encodedSuccessResponse []byte
if !headerWritten {
encodedSuccessResponse = encodeResponse(response)
} else {
encodedSuccessResponse, err = xml.Marshal(response)
if err != nil {
writeErrorResponseWithoutXMLHeader(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}
opts.EncryptFn, err = objInfo.metadataEncryptFn(r.Header)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if r.Header.Get(xMinIOExtract) == "true" && HasSuffix(object, archiveExt) {
opts := ObjectOptions{VersionID: objInfo.VersionID, MTime: objInfo.ModTime}
if _, err := updateObjectMetadataWithZipInfo(ctx, objectAPI, bucket, object, opts); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}
setPutObjHeaders(w, objInfo, false)
if dsc := mustReplicate(ctx, bucket, object, getMustReplicateOptions(objInfo, replication.ObjectReplicationType, opts)); dsc.ReplicateAny() {
scheduleReplication(ctx, objInfo.Clone(), objectAPI, dsc, replication.ObjectReplicationType)
}
if _, ok := r.Header[xhttp.MinIOSourceReplicationRequest]; ok {
actualSize, _ := objInfo.GetActualSize()
defer globalReplicationStats.UpdateReplicaStat(bucket, actualSize)
}
// Write success response.
writeSuccessResponseXML(w, encodedSuccessResponse)
// Notify object created event.
evt := eventArgs{
EventName: event.ObjectCreatedCompleteMultipartUpload,
BucketName: bucket,
Object: objInfo,
ReqParams: extractReqParams(r),
RespElements: extractRespElements(w),
UserAgent: r.UserAgent(),
Host: handlers.GetSourceIP(r),
}
sendEvent(evt)
if objInfo.NumVersions > dataScannerExcessiveVersionsThreshold {
evt.EventName = event.ObjectManyVersions
sendEvent(evt)
}
// Remove the transitioned object whose object version is being overwritten.
if !globalTierConfigMgr.Empty() {
// Schedule object for immediate transition if eligible.
enqueueTransitionImmediate(objInfo, lcEventSrc_s3CompleteMultipartUpload)
logger.LogIf(ctx, os.Sweep())
}
}
// AbortMultipartUploadHandler - Abort multipart upload
func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AbortMultipartUpload")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
abortMultipartUpload := objectAPI.AbortMultipartUpload
if api.CacheAPI() != nil {
abortMultipartUpload = api.CacheAPI().AbortMultipartUpload
}
if s3Error := checkRequestAuthType(ctx, r, policy.AbortMultipartUploadAction, bucket, object); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
uploadID, _, _, _, s3Error := getObjectResources(r.Form)
if s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
opts := ObjectOptions{}
if err := abortMultipartUpload(ctx, bucket, object, uploadID, opts); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
writeSuccessNoContent(w)
}
// ListObjectPartsHandler - List object parts
func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListObjectParts")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.ListMultipartUploadPartsAction, bucket, object); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
uploadID, partNumberMarker, maxParts, encodingType, s3Error := getObjectResources(r.Form)
if s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
if partNumberMarker < 0 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPartNumberMarker), r.URL)
return
}
if maxParts < 0 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidMaxParts), r.URL)
return
}
opts := ObjectOptions{}
listPartsInfo, err := objectAPI.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// We have to adjust the size of encrypted parts since encrypted parts
// are slightly larger due to encryption overhead.
// Further, we have to adjust the ETags of parts when using SSE-S3.
// Due to AWS S3, SSE-S3 encrypted parts return the plaintext ETag
// being the content MD5 of that particular part. This is not the
// case for SSE-C and SSE-KMS objects.
if kind, ok := crypto.IsEncrypted(listPartsInfo.UserDefined); ok {
var objectEncryptionKey []byte
if kind == crypto.S3 {
objectEncryptionKey, err = decryptObjectMeta(nil, bucket, object, listPartsInfo.UserDefined)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}
for i, p := range listPartsInfo.Parts {
listPartsInfo.Parts[i].ETag = tryDecryptETag(objectEncryptionKey, p.ETag, kind == crypto.S3)
listPartsInfo.Parts[i].Size = p.ActualSize
}
}
response := generateListPartsResponse(listPartsInfo, encodingType)
encodedSuccessResponse := encodeResponse(response)
// Write success response.
writeSuccessResponseXML(w, encodedSuccessResponse)
}
type whiteSpaceWriter struct {
http.ResponseWriter
http.Flusher
written bool
}
func (w *whiteSpaceWriter) Write(b []byte) (n int, err error) {
n, err = w.ResponseWriter.Write(b)
w.written = true
return
}
func (w *whiteSpaceWriter) WriteHeader(statusCode int) {
if !w.written {
w.ResponseWriter.WriteHeader(statusCode)
}
}
// Send empty whitespaces every 10 seconds to the client till completeMultiPartUpload() is
// done so that the client does not time out. Downside is we might send 200 OK and
// then send error XML. But accoording to S3 spec the client is supposed to check
// for error XML even if it received 200 OK. But for erasure this is not a problem
// as completeMultiPartUpload() is quick. Even For FS, it would not be an issue as
// we do background append as and when the parts arrive and completeMultiPartUpload
// is quick. Only in a rare case where parts would be out of order will
// FS:completeMultiPartUpload() take a longer time.
func sendWhiteSpace(ctx context.Context, w http.ResponseWriter) <-chan bool {
doneCh := make(chan bool)
go func() {
defer close(doneCh)
ticker := time.NewTicker(time.Second * 10)
defer ticker.Stop()
headerWritten := false
for {
select {
case <-ticker.C:
// Write header if not written yet.
if !headerWritten {
_, err := w.Write([]byte(xml.Header))
headerWritten = err == nil
}
// Once header is written keep writing empty spaces
// which are ignored by client SDK XML parsers.
// This occurs when server takes long time to completeMultiPartUpload()
_, err := w.Write([]byte(" "))
if err != nil {
return
}
w.(http.Flusher).Flush()
case doneCh <- headerWritten:
return
case <-ctx.Done():
return
}
}
}()
return doneCh
}
<file_sep># MinIO Quickstart Guide
[](https://slack.min.io) [](https://hub.docker.com/r/minio/minio/) [](https://github.com/minio/minio/blob/master/LICENSE)
[](https://min.io)
MinIO is a High Performance Object Storage released under GNU Affero General Public License v3.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.
This README provides quickstart instructions on running MinIO on bare metal hardware, including container-based installations. For Kubernetes environments, use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
## Container Installation
Use the following commands to run a standalone MinIO server as a container.
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html)
for more complete documentation.
### Stable
Run the following command to run the latest stable image of MinIO as a container using an ephemeral data volume:
```sh
podman run -p 9000:9000 -p 9001:9001 \
quay.io/minio/minio server /data --console-address ":9001"
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded
object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
> NOTE: To deploy MinIO on with persistent storage, you must map local persistent directories from the host OS to the container using the `podman -v` option. For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the container.
## macOS
Use the following commands to run a standalone MinIO server on macOS.
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation.
### Homebrew (recommended)
Run the following command to install the latest stable MinIO package using [Homebrew](https://brew.sh/). Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
```sh
brew install minio/stable/minio
minio server /data
```
> NOTE: If you previously installed minio using `brew install minio` then it is recommended that you reinstall minio from `minio/stable/minio` official repo instead.
```sh
brew uninstall minio
brew install minio/stable/minio
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html/> to view MinIO SDKs for supported languages.
### Binary Download
Use the following command to download and run a standalone MinIO server on macOS. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
```sh
wget https://dl.min.io/server/minio/release/darwin-amd64/minio
chmod +x minio
./minio server /data
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
## GNU/Linux
Use the following command to run a standalone MinIO server on Linux hosts running 64-bit Intel/AMD architectures. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
```sh
wget https://dl.min.io/server/minio/release/linux-amd64/minio
chmod +x minio
./minio server /data
```
The following table lists supported architectures. Replace the `wget` URL with the architecture for your Linux host.
| Architecture | URL |
| -------- | ------ |
| 64-bit Intel/AMD | <https://dl.min.io/server/minio/release/linux-amd64/minio> |
| 64-bit ARM | <https://dl.min.io/server/minio/release/linux-arm64/minio> |
| 64-bit PowerPC LE (ppc64le) | <https://dl.min.io/server/minio/release/linux-ppc64le/minio> |
| IBM Z-Series (S390X) | <https://dl.min.io/server/minio/release/linux-s390x/minio> |
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.
## Microsoft Windows
To run MinIO on 64-bit Windows hosts, download the MinIO executable from the following URL:
```sh
https://dl.min.io/server/minio/release/windows-amd64/minio.exe
```
Use the following command to run a standalone MinIO server on the Windows host. Replace ``D:\`` with the path to the drive or directory in which you want MinIO to store data. You must change the terminal or powershell directory to the location of the ``minio.exe`` executable, *or* add the path to that directory to the system ``$PATH``:
```sh
minio.exe server D:\
```
The MinIO deployment starts using default root credentials `<PASSWORD>:<PASSWORD>`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.
## Install from Source
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.19](https://golang.org/dl/#stable)
```sh
go install github.com/minio/minio@latest
```
The MinIO deployment starts using default root credentials `<PASSWORD>:<PASSWORD>`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation.
MinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments.
## Deployment Recommendations
### Allow port access for Firewalls
By default MinIO uses the port 9000 to listen for incoming connections. If your platform blocks the port by default, you may need to enable access to the port.
### ufw
For hosts with ufw enabled (Debian based distros), you can use `ufw` command to allow traffic to specific ports. Use below command to allow access to port 9000
```sh
ufw allow 9000
```
Below command enables all incoming traffic to ports ranging from 9000 to 9010.
```sh
ufw allow 9000:9010/tcp
```
### firewall-cmd
For hosts with firewall-cmd enabled (CentOS), you can use `firewall-cmd` command to allow traffic to specific ports. Use below commands to allow access to port 9000
```sh
firewall-cmd --get-active-zones
```
This command gets the active zone(s). Now, apply port rules to the relevant zones returned above. For example if the zone is `public`, use
```sh
firewall-cmd --zone=public --add-port=9000/tcp --permanent
```
Note that `permanent` makes sure the rules are persistent across firewall start, restart or reload. Finally reload the firewall for changes to take effect.
```sh
firewall-cmd --reload
```
### iptables
For hosts with iptables enabled (RHEL, CentOS, etc), you can use `iptables` command to enable all traffic coming to specific ports. Use below command to allow
access to port 9000
```sh
iptables -A INPUT -p tcp --dport 9000 -j ACCEPT
service iptables restart
```
Below command enables all incoming traffic to ports ranging from 9000 to 9010.
```sh
iptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT
service iptables restart
```
## Test MinIO Connectivity
### Test using MinIO Console
MinIO Server comes with an embedded web based object browser. Point your web browser to <http://127.0.0.1:9000> to ensure your server has started successfully.
> NOTE: MinIO runs console on random port by default if you wish choose a specific port use `--console-address` to pick a specific interface and port.
### Things to consider
MinIO redirects browser access requests to the configured server port (i.e. `127.0.0.1:9000`) to the configured Console port. MinIO uses the hostname or IP address specified in the request when building the redirect URL. The URL and port *must* be accessible by the client for the redirection to work.
For deployments behind a load balancer, proxy, or ingress rule where the MinIO host IP address or port is not public, use the `MINIO_BROWSER_REDIRECT_URL` environment variable to specify the external hostname for the redirect. The LB/Proxy must have rules for directing traffic to the Console port specifically.
For example, consider a MinIO deployment behind a proxy `https://minio.example.net`, `https://console.minio.example.net` with rules for forwarding traffic on port :9000 and :9001 to MinIO and the MinIO Console respectively on the internal network. Set `MINIO_BROWSER_REDIRECT_URL` to `https://console.minio.example.net` to ensure the browser receives a valid reachable URL.
Similarly, if your TLS certificates do not have the IP SAN for the MinIO server host, the MinIO Console may fail to validate the connection to the server. Use the `MINIO_SERVER_URL` environment variable and specify the proxy-accessible hostname of the MinIO server to allow the Console to use the MinIO server API using the TLS certificate.
For example: `export MINIO_SERVER_URL="https://minio.example.net"`
| Dashboard | Creating a bucket |
| ------------- | ------------- |
|  |  |
## Test using MinIO Client `mc`
`mc` provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. Follow the MinIO Client [Quickstart Guide](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart) for further instructions.
## Upgrading MinIO
Upgrades require zero downtime in MinIO, all upgrades are non-disruptive, all transactions on MinIO are atomic. So upgrading all the servers simultaneously is the recommended way to upgrade MinIO.
> NOTE: requires internet access to update directly from <https://dl.min.io>, optionally you can host any mirrors at <https://my-artifactory.example.com/minio/>
- For deployments that installed the MinIO server binary by hand, use [`mc admin update`](https://min.io/docs/minio/linux/reference/minio-mc-admin/mc-admin-update.html)
```sh
mc admin update <minio alias, e.g., myminio>
```
- For deployments without external internet access (e.g. airgapped environments), download the binary from <https://dl.min.io> and replace the existing MinIO binary let's say for example `/opt/bin/minio`, apply executable permissions `chmod +x /opt/bin/minio` and proceed to perform `mc admin service restart alias/`.
- For installations using Systemd MinIO service, upgrade via RPM/DEB packages **parallelly** on all servers or replace the binary lets say `/opt/bin/minio` on all nodes, apply executable permissions `chmod +x /opt/bin/minio` and process to perform `mc admin service restart alias/`.
### Upgrade Checklist
- Test all upgrades in a lower environment (DEV, QA, UAT) before applying to production. Performing blind upgrades in production environments carries significant risk.
- Read the release notes for MinIO *before* performing any upgrade, there is no forced requirement to upgrade to latest releases upon every releases. Some releases may not be relevant to your setup, avoid upgrading production environments unnecessarily.
- If you plan to use `mc admin update`, MinIO process must have write access to the parent directory where the binary is present on the host system.
- `mc admin update` is not supported and should be avoided in kubernetes/container environments, please upgrade containers by upgrading relevant container images.
- **We do not recommend upgrading one MinIO server at a time, the product is designed to support parallel upgrades please follow our recommended guidelines.**
## Explore Further
- [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html)
- [Use `mc` with MinIO Server](https://min.io/docs/minio/linux/reference/minio-mc.html)
- [Use `minio-go` SDK with MinIO Server](https://min.io/docs/minio/linux/developers/go/minio-go.html)
- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html)
## Contribute to MinIO Project
Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
## License
- MinIO source is licensed under the GNU AGPLv3 license that can be found in the [LICENSE](https://github.com/minio/minio/blob/master/LICENSE) file.
- MinIO [Documentation](https://github.com/minio/minio/tree/master/docs) ยฉ 2021 by MinIO, Inc is licensed under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/).
- [License Compliance](https://github.com/minio/minio/blob/master/COMPLIANCE.md)
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"sort"
"strings"
"sync"
"time"
"github.com/cespare/xxhash/v2"
"github.com/google/uuid"
jsoniter "github.com/json-iterator/go"
"github.com/minio/minio/internal/bucket/lifecycle"
"github.com/minio/minio/internal/bucket/replication"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/tinylib/msgp/msgp"
)
var (
// XL header specifies the format
xlHeader = [4]byte{'X', 'L', '2', ' '}
// Current version being written.
xlVersionCurrent [4]byte
)
//go:generate msgp -file=$GOFILE -unexported
//go:generate stringer -type VersionType,ErasureAlgo -output=xl-storage-format-v2_string.go $GOFILE
const (
// Breaking changes.
// Newer versions cannot be read by older software.
// This will prevent downgrades to incompatible versions.
xlVersionMajor = 1
// Non breaking changes.
// Bumping this is informational, but should be done
// if any change is made to the data stored, bumping this
// will allow to detect the exact version later.
xlVersionMinor = 3
)
func init() {
binary.LittleEndian.PutUint16(xlVersionCurrent[0:2], xlVersionMajor)
binary.LittleEndian.PutUint16(xlVersionCurrent[2:4], xlVersionMinor)
}
// The []journal contains all the different versions of the object.
//
// This array can have 3 kinds of objects:
//
// ``object``: If the object is uploaded the usual way: putobject, multipart-put, copyobject
//
// ``delete``: This is the delete-marker
//
// ``legacyObject``: This is the legacy object in xlV1 format, preserved until its overwritten
//
// The most recently updated element in the array is considered the latest version.
// In addition to these we have a special kind called free-version. This is represented
// using a delete-marker and MetaSys entries. It's used to track tiered content of a
// deleted/overwritten version. This version is visible _only_to the scanner routine, for subsequent deletion.
// This kind of tracking is necessary since a version's tiered content is deleted asynchronously.
// Backend directory tree structure:
// disk1/
// โโโ bucket
// โโโ object
// โโโ a192c1d5-9bd5-41fd-9a90-ab10e165398d
// โ โโโ part.1
// โโโ c06e0436-f813-447e-ae5e-f2564df9dfd4
// โ โโโ part.1
// โโโ df433928-2dcf-47b1-a786-43efa0f6b424
// โ โโโ part.1
// โโโ legacy
// โ โโโ part.1
// โโโ xl.meta
// VersionType defines the type of journal type of the current entry.
type VersionType uint8
// List of different types of journal type
const (
invalidVersionType VersionType = 0
ObjectType VersionType = 1
DeleteType VersionType = 2
LegacyType VersionType = 3
lastVersionType VersionType = 4
)
func (e VersionType) valid() bool {
return e > invalidVersionType && e < lastVersionType
}
// ErasureAlgo defines common type of different erasure algorithms
type ErasureAlgo uint8
// List of currently supported erasure coding algorithms
const (
invalidErasureAlgo ErasureAlgo = 0
ReedSolomon ErasureAlgo = 1
lastErasureAlgo ErasureAlgo = 2
)
func (e ErasureAlgo) valid() bool {
return e > invalidErasureAlgo && e < lastErasureAlgo
}
// ChecksumAlgo defines common type of different checksum algorithms
type ChecksumAlgo uint8
// List of currently supported checksum algorithms
const (
invalidChecksumAlgo ChecksumAlgo = 0
HighwayHash ChecksumAlgo = 1
lastChecksumAlgo ChecksumAlgo = 2
)
func (e ChecksumAlgo) valid() bool {
return e > invalidChecksumAlgo && e < lastChecksumAlgo
}
// xlMetaV2DeleteMarker defines the data struct for the delete marker journal type
type xlMetaV2DeleteMarker struct {
VersionID [16]byte `json:"ID" msg:"ID"` // Version ID for delete marker
ModTime int64 `json:"MTime" msg:"MTime"` // Object delete marker modified time
MetaSys map[string][]byte `json:"MetaSys,omitempty" msg:"MetaSys,omitempty"` // Delete marker internal metadata
}
// xlMetaV2Object defines the data struct for object journal type
type xlMetaV2Object struct {
VersionID [16]byte `json:"ID" msg:"ID"` // Version ID
DataDir [16]byte `json:"DDir" msg:"DDir"` // Data dir ID
ErasureAlgorithm ErasureAlgo `json:"EcAlgo" msg:"EcAlgo"` // Erasure coding algorithm
ErasureM int `json:"EcM" msg:"EcM"` // Erasure data blocks
ErasureN int `json:"EcN" msg:"EcN"` // Erasure parity blocks
ErasureBlockSize int64 `json:"EcBSize" msg:"EcBSize"` // Erasure block size
ErasureIndex int `json:"EcIndex" msg:"EcIndex"` // Erasure disk index
ErasureDist []uint8 `json:"EcDist" msg:"EcDist"` // Erasure distribution
BitrotChecksumAlgo ChecksumAlgo `json:"CSumAlgo" msg:"CSumAlgo"` // Bitrot checksum algo
PartNumbers []int `json:"PartNums" msg:"PartNums"` // Part Numbers
PartETags []string `json:"PartETags" msg:"PartETags,allownil"` // Part ETags
PartSizes []int64 `json:"PartSizes" msg:"PartSizes"` // Part Sizes
PartActualSizes []int64 `json:"PartASizes,omitempty" msg:"PartASizes,allownil"` // Part ActualSizes (compression)
PartIndices [][]byte `json:"PartIndices,omitempty" msg:"PartIdx,omitempty"` // Part Indexes (compression)
Size int64 `json:"Size" msg:"Size"` // Object version size
ModTime int64 `json:"MTime" msg:"MTime"` // Object version modified time
MetaSys map[string][]byte `json:"MetaSys,omitempty" msg:"MetaSys,allownil"` // Object version internal metadata
MetaUser map[string]string `json:"MetaUsr,omitempty" msg:"MetaUsr,allownil"` // Object version metadata set by user
}
// xlMetaV2Version describes the journal entry, Type defines
// the current journal entry type other types might be nil based
// on what Type field carries, it is imperative for the caller
// to verify which journal type first before accessing rest of the fields.
type xlMetaV2Version struct {
Type VersionType `json:"Type" msg:"Type"`
ObjectV1 *xlMetaV1Object `json:"V1Obj,omitempty" msg:"V1Obj,omitempty"`
ObjectV2 *xlMetaV2Object `json:"V2Obj,omitempty" msg:"V2Obj,omitempty"`
DeleteMarker *xlMetaV2DeleteMarker `json:"DelObj,omitempty" msg:"DelObj,omitempty"`
WrittenByVersion uint64 `msg:"v"` // Tracks written by MinIO version
}
// xlFlags contains flags on the object.
// This can be extended up to 64 bits without breaking compatibility.
type xlFlags uint8
const (
xlFlagFreeVersion xlFlags = 1 << iota
xlFlagUsesDataDir
xlFlagInlineData
)
func (x xlFlags) String() string {
var s strings.Builder
if x&xlFlagFreeVersion != 0 {
s.WriteString("FreeVersion")
}
if x&xlFlagUsesDataDir != 0 {
if s.Len() > 0 {
s.WriteByte(',')
}
s.WriteString("UsesDD")
}
if x&xlFlagInlineData != 0 {
if s.Len() > 0 {
s.WriteByte(',')
}
s.WriteString("Inline")
}
return s.String()
}
// checkXL2V1 will check if the metadata has correct header and is a known major version.
// The remaining payload and versions are returned.
func checkXL2V1(buf []byte) (payload []byte, major, minor uint16, err error) {
if len(buf) <= 8 {
return payload, 0, 0, fmt.Errorf("xlMeta: no data")
}
if !bytes.Equal(buf[:4], xlHeader[:]) {
return payload, 0, 0, fmt.Errorf("xlMeta: unknown XLv2 header, expected %v, got %v", xlHeader[:4], buf[:4])
}
if bytes.Equal(buf[4:8], []byte("1 ")) {
// Set as 1,0.
major, minor = 1, 0
} else {
major, minor = binary.LittleEndian.Uint16(buf[4:6]), binary.LittleEndian.Uint16(buf[6:8])
}
if major > xlVersionMajor {
return buf[8:], major, minor, fmt.Errorf("xlMeta: unknown major version %d found", major)
}
return buf[8:], major, minor, nil
}
func isXL2V1Format(buf []byte) bool {
_, _, _, err := checkXL2V1(buf)
return err == nil
}
//msgp:tuple xlMetaV2VersionHeader
type xlMetaV2VersionHeader struct {
VersionID [16]byte
ModTime int64
Signature [4]byte
Type VersionType
Flags xlFlags
}
func (x xlMetaV2VersionHeader) String() string {
return fmt.Sprintf("Type: %s, VersionID: %s, Signature: %s, ModTime: %s, Flags: %s",
x.Type.String(),
hex.EncodeToString(x.VersionID[:]),
hex.EncodeToString(x.Signature[:]),
time.Unix(0, x.ModTime),
x.Flags.String(),
)
}
// matchesNotStrict returns whether x and o have both have non-zero version,
// their versions match and their type match.
// If they have zero version, modtime must match.
func (x xlMetaV2VersionHeader) matchesNotStrict(o xlMetaV2VersionHeader) bool {
if x.VersionID == [16]byte{} {
return x.VersionID == o.VersionID &&
x.Type == o.Type && o.ModTime == x.ModTime
}
return x.VersionID == o.VersionID &&
x.Type == o.Type
}
// sortsBefore can be used as a tiebreaker for stable sorting/selecting.
// Returns false on ties.
func (x xlMetaV2VersionHeader) sortsBefore(o xlMetaV2VersionHeader) bool {
if x == o {
return false
}
// Prefer newest modtime.
if x.ModTime != o.ModTime {
return x.ModTime > o.ModTime
}
// The following doesn't make too much sense, but we want sort to be consistent nonetheless.
// Prefer lower types
if x.Type != o.Type {
return x.Type < o.Type
}
// Consistent sort on signature
if v := bytes.Compare(x.Signature[:], o.Signature[:]); v != 0 {
return v > 0
}
// On ID mismatch
if v := bytes.Compare(x.VersionID[:], o.VersionID[:]); v != 0 {
return v > 0
}
// Flags
if x.Flags != o.Flags {
return x.Flags > o.Flags
}
return false
}
// Valid xl meta xlMetaV2Version is valid
func (j xlMetaV2Version) Valid() bool {
if !j.Type.valid() {
return false
}
switch j.Type {
case LegacyType:
return j.ObjectV1 != nil &&
j.ObjectV1.valid()
case ObjectType:
return j.ObjectV2 != nil &&
j.ObjectV2.ErasureAlgorithm.valid() &&
j.ObjectV2.BitrotChecksumAlgo.valid() &&
isXLMetaErasureInfoValid(j.ObjectV2.ErasureM, j.ObjectV2.ErasureN) &&
j.ObjectV2.ModTime > 0
case DeleteType:
return j.DeleteMarker != nil &&
j.DeleteMarker.ModTime > 0
}
return false
}
// header will return a shallow header of the version.
func (j *xlMetaV2Version) header() xlMetaV2VersionHeader {
var flags xlFlags
if j.FreeVersion() {
flags |= xlFlagFreeVersion
}
if j.Type == ObjectType && j.ObjectV2.UsesDataDir() {
flags |= xlFlagUsesDataDir
}
if j.Type == ObjectType && j.ObjectV2.InlineData() {
flags |= xlFlagInlineData
}
return xlMetaV2VersionHeader{
VersionID: j.getVersionID(),
ModTime: j.getModTime().UnixNano(),
Signature: j.getSignature(),
Type: j.Type,
Flags: flags,
}
}
// FreeVersion returns true if x represents a free-version, false otherwise.
func (x xlMetaV2VersionHeader) FreeVersion() bool {
return x.Flags&xlFlagFreeVersion != 0
}
// UsesDataDir returns true if this object version uses its data directory for
// its contents and false otherwise.
func (x xlMetaV2VersionHeader) UsesDataDir() bool {
return x.Flags&xlFlagUsesDataDir != 0
}
// InlineData returns whether inline data has been set.
// Note that false does not mean there is no inline data,
// only that it is unlikely.
func (x xlMetaV2VersionHeader) InlineData() bool {
return x.Flags&xlFlagInlineData != 0
}
// signatureErr is a signature returned when an error occurs.
var signatureErr = [4]byte{'e', 'r', 'r', 0}
// getSignature will return a signature that is expected to be the same across all disks.
func (j xlMetaV2Version) getSignature() [4]byte {
switch j.Type {
case ObjectType:
return j.ObjectV2.Signature()
case DeleteType:
return j.DeleteMarker.Signature()
case LegacyType:
return j.ObjectV1.Signature()
}
return signatureErr
}
// getModTime will return the ModTime of the underlying version.
func (j xlMetaV2Version) getModTime() time.Time {
switch j.Type {
case ObjectType:
return time.Unix(0, j.ObjectV2.ModTime)
case DeleteType:
return time.Unix(0, j.DeleteMarker.ModTime)
case LegacyType:
return j.ObjectV1.Stat.ModTime
}
return time.Time{}
}
// getVersionID will return the versionID of the underlying version.
func (j xlMetaV2Version) getVersionID() [16]byte {
switch j.Type {
case ObjectType:
return j.ObjectV2.VersionID
case DeleteType:
return j.DeleteMarker.VersionID
case LegacyType:
return [16]byte{}
}
return [16]byte{}
}
// ToFileInfo returns FileInfo of the underlying type.
func (j *xlMetaV2Version) ToFileInfo(volume, path string) (fi FileInfo, err error) {
if j == nil {
return fi, errFileNotFound
}
switch j.Type {
case ObjectType:
fi, err = j.ObjectV2.ToFileInfo(volume, path)
case DeleteType:
fi, err = j.DeleteMarker.ToFileInfo(volume, path)
case LegacyType:
fi, err = j.ObjectV1.ToFileInfo(volume, path)
default:
return fi, errFileNotFound
}
fi.WrittenByVersion = j.WrittenByVersion
return fi, err
}
const (
xlHeaderVersion = 2
xlMetaVersion = 2
)
func (j xlMetaV2DeleteMarker) ToFileInfo(volume, path string) (FileInfo, error) {
versionID := ""
var uv uuid.UUID
// check if the version is not "null"
if j.VersionID != uv {
versionID = uuid.UUID(j.VersionID).String()
}
fi := FileInfo{
Volume: volume,
Name: path,
ModTime: time.Unix(0, j.ModTime).UTC(),
VersionID: versionID,
Deleted: true,
}
fi.Metadata = make(map[string]string, len(j.MetaSys))
for k, v := range j.MetaSys {
fi.Metadata[k] = string(v)
}
fi.ReplicationState = GetInternalReplicationState(j.MetaSys)
if j.FreeVersion() {
fi.SetTierFreeVersion()
fi.TransitionTier = string(j.MetaSys[metaTierName])
fi.TransitionedObjName = string(j.MetaSys[metaTierObjName])
fi.TransitionVersionID = string(j.MetaSys[metaTierVersionID])
}
return fi, nil
}
// Signature will return a signature that is expected to be the same across all disks.
func (j *xlMetaV2DeleteMarker) Signature() [4]byte {
// Shallow copy
c := *j
// Marshal metadata
crc := hashDeterministicBytes(c.MetaSys)
c.MetaSys = nil
if bts, err := c.MarshalMsg(metaDataPoolGet()); err == nil {
crc ^= xxhash.Sum64(bts)
metaDataPoolPut(bts)
}
// Combine upper and lower part
var tmp [4]byte
binary.LittleEndian.PutUint32(tmp[:], uint32(crc^(crc>>32)))
return tmp
}
// UsesDataDir returns true if this object version uses its data directory for
// its contents and false otherwise.
func (j xlMetaV2Object) UsesDataDir() bool {
// Skip if this version is not transitioned, i.e it uses its data directory.
if !bytes.Equal(j.MetaSys[metaTierStatus], []byte(lifecycle.TransitionComplete)) {
return true
}
// Check if this transitioned object has been restored on disk.
return isRestoredObjectOnDisk(j.MetaUser)
}
// InlineData returns whether inline data has been set.
// Note that false does not mean there is no inline data,
// only that it is unlikely.
func (j xlMetaV2Object) InlineData() bool {
_, ok := j.MetaSys[ReservedMetadataPrefixLower+"inline-data"]
return ok
}
const (
metaTierStatus = ReservedMetadataPrefixLower + TransitionStatus
metaTierObjName = ReservedMetadataPrefixLower + TransitionedObjectName
metaTierVersionID = ReservedMetadataPrefixLower + TransitionedVersionID
metaTierName = ReservedMetadataPrefixLower + TransitionTier
)
func (j *xlMetaV2Object) SetTransition(fi FileInfo) {
j.MetaSys[metaTierStatus] = []byte(fi.TransitionStatus)
j.MetaSys[metaTierObjName] = []byte(fi.TransitionedObjName)
j.MetaSys[metaTierVersionID] = []byte(fi.TransitionVersionID)
j.MetaSys[metaTierName] = []byte(fi.TransitionTier)
}
func (j *xlMetaV2Object) RemoveRestoreHdrs() {
delete(j.MetaUser, xhttp.AmzRestore)
delete(j.MetaUser, xhttp.AmzRestoreExpiryDays)
delete(j.MetaUser, xhttp.AmzRestoreRequestDate)
}
// Signature will return a signature that is expected to be the same across all disks.
func (j *xlMetaV2Object) Signature() [4]byte {
// Shallow copy
c := *j
// Zero fields that will vary across disks
c.ErasureIndex = 0
// Nil 0 size allownil, so we don't differentiate between nil and 0 len.
allEmpty := true
for _, tag := range c.PartETags {
if len(tag) != 0 {
allEmpty = false
break
}
}
if allEmpty {
c.PartETags = nil
}
if len(c.PartActualSizes) == 0 {
c.PartActualSizes = nil
}
// Get a 64 bit CRC
crc := hashDeterministicString(c.MetaUser)
crc ^= hashDeterministicBytes(c.MetaSys)
// Nil fields.
c.MetaSys = nil
c.MetaUser = nil
if bts, err := c.MarshalMsg(metaDataPoolGet()); err == nil {
crc ^= xxhash.Sum64(bts)
metaDataPoolPut(bts)
}
// Combine upper and lower part
var tmp [4]byte
binary.LittleEndian.PutUint32(tmp[:], uint32(crc^(crc>>32)))
return tmp
}
func (j xlMetaV2Object) ToFileInfo(volume, path string) (FileInfo, error) {
versionID := ""
var uv uuid.UUID
// check if the version is not "null"
if j.VersionID != uv {
versionID = uuid.UUID(j.VersionID).String()
}
fi := FileInfo{
Volume: volume,
Name: path,
Size: j.Size,
ModTime: time.Unix(0, j.ModTime).UTC(),
VersionID: versionID,
}
fi.Parts = make([]ObjectPartInfo, len(j.PartNumbers))
for i := range fi.Parts {
fi.Parts[i].Number = j.PartNumbers[i]
fi.Parts[i].Size = j.PartSizes[i]
if len(j.PartETags) == len(fi.Parts) {
fi.Parts[i].ETag = j.PartETags[i]
}
fi.Parts[i].ActualSize = j.PartActualSizes[i]
if len(j.PartIndices) == len(fi.Parts) {
fi.Parts[i].Index = j.PartIndices[i]
}
}
fi.Erasure.Checksums = make([]ChecksumInfo, len(j.PartSizes))
for i := range fi.Parts {
fi.Erasure.Checksums[i].PartNumber = fi.Parts[i].Number
switch j.BitrotChecksumAlgo {
case HighwayHash:
fi.Erasure.Checksums[i].Algorithm = HighwayHash256S
fi.Erasure.Checksums[i].Hash = []byte{}
default:
return FileInfo{}, fmt.Errorf("unknown BitrotChecksumAlgo: %v", j.BitrotChecksumAlgo)
}
}
fi.Metadata = make(map[string]string, len(j.MetaUser)+len(j.MetaSys))
for k, v := range j.MetaUser {
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
continue
}
fi.Metadata[k] = v
}
tierFVIDKey := ReservedMetadataPrefixLower + tierFVID
tierFVMarkerKey := ReservedMetadataPrefixLower + tierFVMarker
for k, v := range j.MetaSys {
// Make sure we skip free-version-id, similar to AddVersion()
if len(k) > len(ReservedMetadataPrefixLower) && strings.EqualFold(k[:len(ReservedMetadataPrefixLower)], ReservedMetadataPrefixLower) {
// Skip tierFVID, tierFVMarker keys; it's used
// only for creating free-version.
switch k {
case tierFVIDKey, tierFVMarkerKey:
continue
}
}
switch {
case strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower), equals(k, VersionPurgeStatusKey):
fi.Metadata[k] = string(v)
}
}
fi.ReplicationState = getInternalReplicationState(fi.Metadata)
fi.Deleted = !fi.VersionPurgeStatus().Empty()
replStatus := fi.ReplicationState.CompositeReplicationStatus()
if replStatus != "" {
fi.Metadata[xhttp.AmzBucketReplicationStatus] = string(replStatus)
}
fi.Erasure.Algorithm = j.ErasureAlgorithm.String()
fi.Erasure.Index = j.ErasureIndex
fi.Erasure.BlockSize = j.ErasureBlockSize
fi.Erasure.DataBlocks = j.ErasureM
fi.Erasure.ParityBlocks = j.ErasureN
fi.Erasure.Distribution = make([]int, len(j.ErasureDist))
for i := range j.ErasureDist {
fi.Erasure.Distribution[i] = int(j.ErasureDist[i])
}
fi.DataDir = uuid.UUID(j.DataDir).String()
if st, ok := j.MetaSys[metaTierStatus]; ok {
fi.TransitionStatus = string(st)
}
if o, ok := j.MetaSys[metaTierObjName]; ok {
fi.TransitionedObjName = string(o)
}
if rv, ok := j.MetaSys[metaTierVersionID]; ok {
fi.TransitionVersionID = string(rv)
}
if sc, ok := j.MetaSys[metaTierName]; ok {
fi.TransitionTier = string(sc)
}
if crcs := j.MetaSys[ReservedMetadataPrefixLower+"crc"]; len(crcs) > 0 {
fi.Checksum = crcs
}
return fi, nil
}
// Read at most this much on initial read.
const metaDataReadDefault = 4 << 10
// Return used metadata byte slices here.
var metaDataPool = sync.Pool{New: func() interface{} { return make([]byte, 0, metaDataReadDefault) }}
// metaDataPoolGet will return a byte slice with capacity at least metaDataReadDefault.
// It will be length 0.
func metaDataPoolGet() []byte {
return metaDataPool.Get().([]byte)[:0]
}
// metaDataPoolPut will put an unused small buffer back into the pool.
func metaDataPoolPut(buf []byte) {
if cap(buf) >= metaDataReadDefault && cap(buf) < metaDataReadDefault*4 {
//nolint:staticcheck // SA6002 we are fine with the tiny alloc
metaDataPool.Put(buf)
}
}
// readXLMetaNoData will load the metadata, but skip data segments.
// This should only be used when data is never interesting.
// If data is not xlv2, it is returned in full.
func readXLMetaNoData(r io.Reader, size int64) ([]byte, error) {
initial := size
hasFull := true
if initial > metaDataReadDefault {
initial = metaDataReadDefault
hasFull = false
}
buf := metaDataPoolGet()[:initial]
_, err := io.ReadFull(r, buf)
if err != nil {
return nil, fmt.Errorf("readXLMetaNoData(io.ReadFull): %w", err)
}
readMore := func(n int64) error {
has := int64(len(buf))
if has >= n {
return nil
}
if hasFull || n > size {
return io.ErrUnexpectedEOF
}
extra := n - has
if int64(cap(buf)) >= n {
// Extend since we have enough space.
buf = buf[:n]
} else {
buf = append(buf, make([]byte, extra)...)
}
_, err := io.ReadFull(r, buf[has:])
if err != nil {
if errors.Is(err, io.EOF) {
// Returned if we read nothing.
err = io.ErrUnexpectedEOF
}
return fmt.Errorf("readXLMetaNoData(readMore): %w", err)
}
return nil
}
tmp, major, minor, err := checkXL2V1(buf)
if err != nil {
err = readMore(size)
return buf, err
}
switch major {
case 1:
switch minor {
case 0:
err = readMore(size)
return buf, err
case 1, 2, 3:
sz, tmp, err := msgp.ReadBytesHeader(tmp)
if err != nil {
return nil, fmt.Errorf("readXLMetaNoData(read_meta): uknown metadata version %w", err)
}
want := int64(sz) + int64(len(buf)-len(tmp))
// v1.1 does not have CRC.
if minor < 2 {
if err := readMore(want); err != nil {
return nil, err
}
return buf[:want], nil
}
// CRC is variable length, so we need to truncate exactly that.
wantMax := want + msgp.Uint32Size
if wantMax > size {
wantMax = size
}
if err := readMore(wantMax); err != nil {
return nil, err
}
if int64(len(buf)) < want {
return nil, fmt.Errorf("buffer shorter than expected (buflen: %d, want: %d): %w", len(buf), want, errFileCorrupt)
}
tmp = buf[want:]
_, after, err := msgp.ReadUint32Bytes(tmp)
if err != nil {
return nil, fmt.Errorf("readXLMetaNoData(read_meta): unknown metadata version %w", err)
}
want += int64(len(tmp) - len(after))
return buf[:want], err
default:
return nil, errors.New("unknown minor metadata version")
}
default:
return nil, errors.New("unknown major metadata version")
}
}
func decodeXLHeaders(buf []byte) (versions int, headerV, metaV uint8, b []byte, err error) {
hdrVer, buf, err := msgp.ReadUint8Bytes(buf)
if err != nil {
return 0, 0, 0, buf, err
}
metaVer, buf, err := msgp.ReadUint8Bytes(buf)
if err != nil {
return 0, 0, 0, buf, err
}
if hdrVer > xlHeaderVersion {
return 0, 0, 0, buf, fmt.Errorf("decodeXLHeaders: Unknown xl header version %d", metaVer)
}
if metaVer > xlMetaVersion {
return 0, 0, 0, buf, fmt.Errorf("decodeXLHeaders: Unknown xl meta version %d", metaVer)
}
versions, buf, err = msgp.ReadIntBytes(buf)
if err != nil {
return 0, 0, 0, buf, err
}
if versions < 0 {
return 0, 0, 0, buf, fmt.Errorf("decodeXLHeaders: Negative version count %d", versions)
}
return versions, hdrVer, metaVer, buf, nil
}
// decodeVersions will decode a number of versions from a buffer
// and perform a callback for each version in order, newest first.
// Return errDoneForNow to stop processing and return nil.
// Any non-nil error is returned.
func decodeVersions(buf []byte, versions int, fn func(idx int, hdr, meta []byte) error) (err error) {
var tHdr, tMeta []byte // Zero copy bytes
for i := 0; i < versions; i++ {
tHdr, buf, err = msgp.ReadBytesZC(buf)
if err != nil {
return err
}
tMeta, buf, err = msgp.ReadBytesZC(buf)
if err != nil {
return err
}
if err = fn(i, tHdr, tMeta); err != nil {
if err == errDoneForNow {
err = nil
}
return err
}
}
return nil
}
// isIndexedMetaV2 returns non-nil result if metadata is indexed.
// Returns 3x nil if not XLV2 or not indexed.
// If indexed and unable to parse an error will be returned.
func isIndexedMetaV2(buf []byte) (meta xlMetaBuf, data xlMetaInlineData, err error) {
buf, major, minor, err := checkXL2V1(buf)
if err != nil || major != 1 || minor < 3 {
return nil, nil, nil
}
meta, buf, err = msgp.ReadBytesZC(buf)
if err != nil {
return nil, nil, err
}
if crc, nbuf, err := msgp.ReadUint32Bytes(buf); err == nil {
// Read metadata CRC
buf = nbuf
if got := uint32(xxhash.Sum64(meta)); got != crc {
return nil, nil, fmt.Errorf("xlMetaV2.Load version(%d), CRC mismatch, want 0x%x, got 0x%x", minor, crc, got)
}
} else {
return nil, nil, err
}
data = buf
if data.validate() != nil {
data.repair()
}
return meta, data, nil
}
type xlMetaV2ShallowVersion struct {
header xlMetaV2VersionHeader
meta []byte
}
//msgp:ignore xlMetaV2 xlMetaV2ShallowVersion
type xlMetaV2 struct {
versions []xlMetaV2ShallowVersion
// data will contain raw data if any.
// data will be one or more versions indexed by versionID.
// To remove all data set to nil.
data xlMetaInlineData
// metadata version.
metaV uint8
}
// LoadOrConvert will load the metadata in the buffer.
// If this is a legacy format, it will automatically be converted to XLV2.
func (x *xlMetaV2) LoadOrConvert(buf []byte) error {
if isXL2V1Format(buf) {
return x.Load(buf)
}
xlMeta := &xlMetaV1Object{}
json := jsoniter.ConfigCompatibleWithStandardLibrary
if err := json.Unmarshal(buf, xlMeta); err != nil {
return errFileCorrupt
}
if len(x.versions) > 0 {
x.versions = x.versions[:0]
}
x.data = nil
x.metaV = xlMetaVersion
return x.AddLegacy(xlMeta)
}
// Load all versions of the stored data.
// Note that references to the incoming buffer will be kept.
func (x *xlMetaV2) Load(buf []byte) error {
if meta, data, err := isIndexedMetaV2(buf); err != nil {
return err
} else if meta != nil {
return x.loadIndexed(meta, data)
}
// Convert older format.
return x.loadLegacy(buf)
}
func (x *xlMetaV2) loadIndexed(buf xlMetaBuf, data xlMetaInlineData) error {
versions, headerV, metaV, buf, err := decodeXLHeaders(buf)
if err != nil {
return err
}
if cap(x.versions) < versions {
x.versions = make([]xlMetaV2ShallowVersion, 0, versions+1)
}
x.versions = x.versions[:versions]
x.data = data
x.metaV = metaV
if err = x.data.validate(); err != nil {
x.data.repair()
logger.LogIf(GlobalContext, fmt.Errorf("xlMetaV2.loadIndexed: data validation failed: %v. %d entries after repair", err, x.data.entries()))
}
return decodeVersions(buf, versions, func(i int, hdr, meta []byte) error {
ver := &x.versions[i]
_, err = ver.header.unmarshalV(headerV, hdr)
if err != nil {
return err
}
ver.meta = meta
// Fix inconsistent x-minio-internal-replication-timestamp by loading and reindexing.
if metaV < 2 && ver.header.Type == DeleteType {
// load (and convert) version.
version, err := x.getIdx(i)
if err == nil {
// Only reindex if set.
_, ok1 := version.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicationTimestamp]
_, ok2 := version.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicaTimestamp]
if ok1 || ok2 {
meta, err := version.MarshalMsg(make([]byte, 0, len(ver.meta)+10))
if err == nil {
// Override both if fine.
ver.header = version.header()
ver.meta = meta
}
}
}
}
return nil
})
}
// loadLegacy will load content prior to v1.3
// Note that references to the incoming buffer will be kept.
func (x *xlMetaV2) loadLegacy(buf []byte) error {
buf, major, minor, err := checkXL2V1(buf)
if err != nil {
return fmt.Errorf("xlMetaV2.Load %w", err)
}
var allMeta []byte
switch major {
case 1:
switch minor {
case 0:
allMeta = buf
case 1, 2:
v, buf, err := msgp.ReadBytesZC(buf)
if err != nil {
return fmt.Errorf("xlMetaV2.Load version(%d), bufLen(%d) %w", minor, len(buf), err)
}
if minor >= 2 {
if crc, nbuf, err := msgp.ReadUint32Bytes(buf); err == nil {
// Read metadata CRC (added in v2)
buf = nbuf
if got := uint32(xxhash.Sum64(v)); got != crc {
return fmt.Errorf("xlMetaV2.Load version(%d), CRC mismatch, want 0x%x, got 0x%x", minor, crc, got)
}
} else {
return fmt.Errorf("xlMetaV2.Load version(%d), loading CRC: %w", minor, err)
}
}
allMeta = v
// Add remaining data.
x.data = buf
if err = x.data.validate(); err != nil {
x.data.repair()
logger.LogIf(GlobalContext, fmt.Errorf("xlMetaV2.Load: data validation failed: %v. %d entries after repair", err, x.data.entries()))
}
default:
return errors.New("unknown minor metadata version")
}
default:
return errors.New("unknown major metadata version")
}
if allMeta == nil {
return errFileCorrupt
}
// bts will shrink as we decode.
bts := allMeta
var field []byte
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
return msgp.WrapError(err, "loadLegacy.ReadMapHeader")
}
var tmp xlMetaV2Version
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
return msgp.WrapError(err, "loadLegacy.ReadMapKey")
}
switch msgp.UnsafeString(field) {
case "Versions":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
return msgp.WrapError(err, "Versions")
}
if cap(x.versions) >= int(zb0002) {
x.versions = (x.versions)[:zb0002]
} else {
x.versions = make([]xlMetaV2ShallowVersion, zb0002, zb0002+1)
}
for za0001 := range x.versions {
start := len(allMeta) - len(bts)
bts, err = tmp.unmarshalV(1, bts)
if err != nil {
return msgp.WrapError(err, "Versions", za0001)
}
end := len(allMeta) - len(bts)
// We reference the marshaled data, so we don't have to re-marshal.
x.versions[za0001] = xlMetaV2ShallowVersion{
header: tmp.header(),
meta: allMeta[start:end],
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
return msgp.WrapError(err, "loadLegacy.Skip")
}
}
}
x.metaV = 1 // Fixed for legacy conversions.
x.sortByModTime()
return nil
}
// latestModtime returns the modtime of the latest version.
func (x *xlMetaV2) latestModtime() time.Time {
if x == nil || len(x.versions) == 0 {
return time.Time{}
}
return time.Unix(0, x.versions[0].header.ModTime)
}
func (x *xlMetaV2) addVersion(ver xlMetaV2Version) error {
modTime := ver.getModTime().UnixNano()
if !ver.Valid() {
return errors.New("attempted to add invalid version")
}
encoded, err := ver.MarshalMsg(nil)
if err != nil {
return err
}
// Add space at the end.
// Will have -1 modtime, so it will be inserted there.
x.versions = append(x.versions, xlMetaV2ShallowVersion{header: xlMetaV2VersionHeader{ModTime: -1}})
// Linear search, we likely have to insert at front.
for i, existing := range x.versions {
if existing.header.ModTime <= modTime {
// Insert at current idx. First move current back.
copy(x.versions[i+1:], x.versions[i:])
x.versions[i] = xlMetaV2ShallowVersion{
header: ver.header(),
meta: encoded,
}
return nil
}
}
return fmt.Errorf("addVersion: Internal error, unable to add version")
}
// AppendTo will marshal the data in z and append it to the provided slice.
func (x *xlMetaV2) AppendTo(dst []byte) ([]byte, error) {
// Header...
sz := len(xlHeader) + len(xlVersionCurrent) + msgp.ArrayHeaderSize + len(dst) + 3*msgp.Uint32Size
// Existing + Inline data
sz += len(dst) + len(x.data)
// Versions...
for _, ver := range x.versions {
sz += 32 + len(ver.meta)
}
if cap(dst) < sz {
buf := make([]byte, len(dst), sz)
copy(buf, dst)
dst = buf
}
if err := x.data.validate(); err != nil {
return nil, err
}
dst = append(dst, xlHeader[:]...)
dst = append(dst, xlVersionCurrent[:]...)
// Add "bin 32" type header to always have enough space.
// We will fill out the correct size when we know it.
dst = append(dst, 0xc6, 0, 0, 0, 0)
dataOffset := len(dst)
dst = msgp.AppendUint(dst, xlHeaderVersion)
dst = msgp.AppendUint(dst, xlMetaVersion)
dst = msgp.AppendInt(dst, len(x.versions))
tmp := metaDataPoolGet()
defer metaDataPoolPut(tmp)
for _, ver := range x.versions {
var err error
// Add header
tmp, err = ver.header.MarshalMsg(tmp[:0])
if err != nil {
return nil, err
}
dst = msgp.AppendBytes(dst, tmp)
// Add full meta
dst = msgp.AppendBytes(dst, ver.meta)
}
// Update size...
binary.BigEndian.PutUint32(dst[dataOffset-4:dataOffset], uint32(len(dst)-dataOffset))
// Add CRC of metadata as fixed size (5 bytes)
// Prior to v1.3 this was variable sized.
tmp = tmp[:5]
tmp[0] = 0xce // muint32
binary.BigEndian.PutUint32(tmp[1:], uint32(xxhash.Sum64(dst[dataOffset:])))
dst = append(dst, tmp[:5]...)
return append(dst, x.data...), nil
}
func (x *xlMetaV2) findVersion(key [16]byte) (idx int, ver *xlMetaV2Version, err error) {
for i, ver := range x.versions {
if key == ver.header.VersionID {
obj, err := x.getIdx(i)
return i, obj, err
}
}
return -1, nil, errFileVersionNotFound
}
func (x *xlMetaV2) getIdx(idx int) (ver *xlMetaV2Version, err error) {
if idx < 0 || idx >= len(x.versions) {
return nil, errFileNotFound
}
var dst xlMetaV2Version
_, err = dst.unmarshalV(x.metaV, x.versions[idx].meta)
if false {
if err == nil && x.versions[idx].header.VersionID != dst.getVersionID() {
panic(fmt.Sprintf("header: %x != object id: %x", x.versions[idx].header.VersionID, dst.getVersionID()))
}
}
return &dst, err
}
// setIdx will replace a version at a given index.
// Note that versions may become re-sorted if modtime changes.
func (x *xlMetaV2) setIdx(idx int, ver xlMetaV2Version) (err error) {
if idx < 0 || idx >= len(x.versions) {
return errFileNotFound
}
update := &x.versions[idx]
prevMod := update.header.ModTime
update.meta, err = ver.MarshalMsg(update.meta[:0:len(update.meta)])
if err != nil {
update.meta = nil
return err
}
update.header = ver.header()
if prevMod != update.header.ModTime {
x.sortByModTime()
}
return nil
}
// getDataDirs will return all data directories in the metadata
// as well as all version ids used for inline data.
func (x *xlMetaV2) getDataDirs() ([]string, error) {
dds := make([]string, len(x.versions)*2)
for i, ver := range x.versions {
if ver.header.Type == DeleteType {
continue
}
obj, err := x.getIdx(i)
if err != nil {
return nil, err
}
switch ver.header.Type {
case ObjectType:
if obj.ObjectV2 == nil {
return nil, errors.New("obj.ObjectV2 unexpectedly nil")
}
dds = append(dds, uuid.UUID(obj.ObjectV2.DataDir).String())
if obj.ObjectV2.VersionID == [16]byte{} {
dds = append(dds, nullVersionID)
} else {
dds = append(dds, uuid.UUID(obj.ObjectV2.VersionID).String())
}
case LegacyType:
if obj.ObjectV1 == nil {
return nil, errors.New("obj.ObjectV1 unexpectedly nil")
}
dds = append(dds, obj.ObjectV1.DataDir)
}
}
return dds, nil
}
// sortByModTime will sort versions by modtime in descending order,
// meaning index 0 will be latest version.
func (x *xlMetaV2) sortByModTime() {
// Quick check
if len(x.versions) <= 1 || sort.SliceIsSorted(x.versions, func(i, j int) bool {
return x.versions[i].header.sortsBefore(x.versions[j].header)
}) {
return
}
// We should sort.
sort.Slice(x.versions, func(i, j int) bool {
return x.versions[i].header.sortsBefore(x.versions[j].header)
})
}
// DeleteVersion deletes the version specified by version id.
// returns to the caller which dataDir to delete, also
// indicates if this is the last version.
func (x *xlMetaV2) DeleteVersion(fi FileInfo) (string, error) {
// This is a situation where versionId is explicitly
// specified as "null", as we do not save "null"
// string it is considered empty. But empty also
// means the version which matches will be purged.
if fi.VersionID == nullVersionID {
fi.VersionID = ""
}
var uv uuid.UUID
var err error
if fi.VersionID != "" {
uv, err = uuid.Parse(fi.VersionID)
if err != nil {
return "", errFileVersionNotFound
}
}
var ventry xlMetaV2Version
if fi.Deleted {
ventry = xlMetaV2Version{
Type: DeleteType,
DeleteMarker: &xlMetaV2DeleteMarker{
VersionID: uv,
ModTime: fi.ModTime.UnixNano(),
MetaSys: make(map[string][]byte),
},
WrittenByVersion: globalVersionUnix,
}
if !ventry.Valid() {
return "", errors.New("internal error: invalid version entry generated")
}
}
updateVersion := false
if fi.VersionPurgeStatus().Empty() && (fi.DeleteMarkerReplicationStatus() == "REPLICA" || fi.DeleteMarkerReplicationStatus().Empty()) {
updateVersion = fi.MarkDeleted
} else {
// for replication scenario
if fi.Deleted && fi.VersionPurgeStatus() != Complete {
if !fi.VersionPurgeStatus().Empty() || fi.DeleteMarkerReplicationStatus().Empty() {
updateVersion = true
}
}
// object or delete-marker versioned delete is not complete
if !fi.VersionPurgeStatus().Empty() && fi.VersionPurgeStatus() != Complete {
updateVersion = true
}
}
if fi.Deleted {
if !fi.DeleteMarkerReplicationStatus().Empty() {
switch fi.DeleteMarkerReplicationStatus() {
case replication.Replica:
ventry.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicaStatus] = []byte(fi.ReplicationState.ReplicaStatus)
ventry.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicaTimestamp] = []byte(fi.ReplicationState.ReplicaTimeStamp.UTC().Format(time.RFC3339Nano))
default:
ventry.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicationStatus] = []byte(fi.ReplicationState.ReplicationStatusInternal)
ventry.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicationTimestamp] = []byte(fi.ReplicationState.ReplicationTimeStamp.UTC().Format(time.RFC3339Nano))
}
}
if !fi.VersionPurgeStatus().Empty() {
ventry.DeleteMarker.MetaSys[VersionPurgeStatusKey] = []byte(fi.ReplicationState.VersionPurgeStatusInternal)
}
for k, v := range fi.ReplicationState.ResetStatusesMap {
ventry.DeleteMarker.MetaSys[k] = []byte(v)
}
}
for i, ver := range x.versions {
if ver.header.VersionID != uv {
continue
}
switch ver.header.Type {
case LegacyType:
ver, err := x.getIdx(i)
if err != nil {
return "", err
}
x.versions = append(x.versions[:i], x.versions[i+1:]...)
if fi.Deleted {
err = x.addVersion(ventry)
}
return ver.ObjectV1.DataDir, err
case DeleteType:
if updateVersion {
ver, err := x.getIdx(i)
if err != nil {
return "", err
}
if len(ver.DeleteMarker.MetaSys) == 0 {
ver.DeleteMarker.MetaSys = make(map[string][]byte)
}
if !fi.DeleteMarkerReplicationStatus().Empty() {
switch fi.DeleteMarkerReplicationStatus() {
case replication.Replica:
ver.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicaStatus] = []byte(fi.ReplicationState.ReplicaStatus)
ver.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicaTimestamp] = []byte(fi.ReplicationState.ReplicaTimeStamp.UTC().Format(time.RFC3339Nano))
default:
ver.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicationStatus] = []byte(fi.ReplicationState.ReplicationStatusInternal)
ver.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicationTimestamp] = []byte(fi.ReplicationState.ReplicationTimeStamp.UTC().Format(time.RFC3339Nano))
}
}
if !fi.VersionPurgeStatus().Empty() {
ver.DeleteMarker.MetaSys[VersionPurgeStatusKey] = []byte(fi.ReplicationState.VersionPurgeStatusInternal)
}
for k, v := range fi.ReplicationState.ResetStatusesMap {
ver.DeleteMarker.MetaSys[k] = []byte(v)
}
err = x.setIdx(i, *ver)
return "", err
}
var err error
x.versions = append(x.versions[:i], x.versions[i+1:]...)
if fi.MarkDeleted && (fi.VersionPurgeStatus().Empty() || (fi.VersionPurgeStatus() != Complete)) {
err = x.addVersion(ventry)
}
return "", err
case ObjectType:
if updateVersion && !fi.Deleted {
ver, err := x.getIdx(i)
if err != nil {
return "", err
}
ver.ObjectV2.MetaSys[VersionPurgeStatusKey] = []byte(fi.ReplicationState.VersionPurgeStatusInternal)
for k, v := range fi.ReplicationState.ResetStatusesMap {
ver.ObjectV2.MetaSys[k] = []byte(v)
}
err = x.setIdx(i, *ver)
return uuid.UUID(ver.ObjectV2.DataDir).String(), err
}
}
}
for i, version := range x.versions {
if version.header.Type != ObjectType || version.header.VersionID != uv {
continue
}
ver, err := x.getIdx(i)
if err != nil {
return "", err
}
switch {
case fi.ExpireRestored:
ver.ObjectV2.RemoveRestoreHdrs()
err = x.setIdx(i, *ver)
case fi.TransitionStatus == lifecycle.TransitionComplete:
ver.ObjectV2.SetTransition(fi)
err = x.setIdx(i, *ver)
default:
x.versions = append(x.versions[:i], x.versions[i+1:]...)
// if uv has tiered content we add a
// free-version to track it for
// asynchronous deletion via scanner.
if freeVersion, toFree := ver.ObjectV2.InitFreeVersion(fi); toFree {
err = x.addVersion(freeVersion)
}
}
logger.LogIf(context.Background(), err)
if fi.Deleted {
err = x.addVersion(ventry)
}
if x.SharedDataDirCount(ver.ObjectV2.VersionID, ver.ObjectV2.DataDir) > 0 {
// Found that another version references the same dataDir
// we shouldn't remove it, and only remove the version instead
return "", nil
}
return uuid.UUID(ver.ObjectV2.DataDir).String(), err
}
if fi.Deleted {
err = x.addVersion(ventry)
return "", err
}
return "", errFileVersionNotFound
}
// xlMetaDataDirDecoder is a shallow decoder for decoding object datadir only.
type xlMetaDataDirDecoder struct {
ObjectV2 *struct {
DataDir [16]byte `msg:"DDir"` // Data dir ID
} `msg:"V2Obj,omitempty"`
}
// UpdateObjectVersion updates metadata and modTime for a given
// versionID, NOTE: versionID must be valid and should exist -
// and must not be a DeleteMarker or legacy object, if no
// versionID is specified 'null' versionID is updated instead.
//
// It is callers responsibility to set correct versionID, this
// function shouldn't be further extended to update immutable
// values such as ErasureInfo, ChecksumInfo.
//
// Metadata is only updated to new values, existing values
// stay as is, if you wish to update all values you should
// update all metadata freshly before calling this function
// in-case you wish to clear existing metadata.
func (x *xlMetaV2) UpdateObjectVersion(fi FileInfo) error {
if fi.VersionID == "" {
// this means versioning is not yet
// enabled or suspend i.e all versions
// are basically default value i.e "null"
fi.VersionID = nullVersionID
}
var uv uuid.UUID
var err error
if fi.VersionID != "" && fi.VersionID != nullVersionID {
uv, err = uuid.Parse(fi.VersionID)
if err != nil {
return err
}
}
for i, version := range x.versions {
switch version.header.Type {
case LegacyType, DeleteType:
if version.header.VersionID == uv {
return errMethodNotAllowed
}
case ObjectType:
if version.header.VersionID == uv {
ver, err := x.getIdx(i)
if err != nil {
return err
}
for k, v := range fi.Metadata {
if len(k) > len(ReservedMetadataPrefixLower) && strings.EqualFold(k[:len(ReservedMetadataPrefixLower)], ReservedMetadataPrefixLower) {
ver.ObjectV2.MetaSys[k] = []byte(v)
} else {
ver.ObjectV2.MetaUser[k] = v
}
}
if !fi.ModTime.IsZero() {
ver.ObjectV2.ModTime = fi.ModTime.UnixNano()
}
return x.setIdx(i, *ver)
}
}
}
return errFileVersionNotFound
}
// AddVersion adds a new version
func (x *xlMetaV2) AddVersion(fi FileInfo) error {
if fi.VersionID == "" {
// this means versioning is not yet
// enabled or suspend i.e all versions
// are basically default value i.e "null"
fi.VersionID = nullVersionID
}
var uv uuid.UUID
var err error
if fi.VersionID != "" && fi.VersionID != nullVersionID {
uv, err = uuid.Parse(fi.VersionID)
if err != nil {
return err
}
}
var dd uuid.UUID
if fi.DataDir != "" {
dd, err = uuid.Parse(fi.DataDir)
if err != nil {
return err
}
}
ventry := xlMetaV2Version{
WrittenByVersion: globalVersionUnix,
}
if fi.Deleted {
ventry.Type = DeleteType
ventry.DeleteMarker = &xlMetaV2DeleteMarker{
VersionID: uv,
ModTime: fi.ModTime.UnixNano(),
MetaSys: make(map[string][]byte),
}
} else {
ventry.Type = ObjectType
ventry.ObjectV2 = &xlMetaV2Object{
VersionID: uv,
DataDir: dd,
Size: fi.Size,
ModTime: fi.ModTime.UnixNano(),
ErasureAlgorithm: ReedSolomon,
ErasureM: fi.Erasure.DataBlocks,
ErasureN: fi.Erasure.ParityBlocks,
ErasureBlockSize: fi.Erasure.BlockSize,
ErasureIndex: fi.Erasure.Index,
BitrotChecksumAlgo: HighwayHash,
ErasureDist: make([]uint8, len(fi.Erasure.Distribution)),
PartNumbers: make([]int, len(fi.Parts)),
PartETags: nil,
PartSizes: make([]int64, len(fi.Parts)),
PartActualSizes: make([]int64, len(fi.Parts)),
MetaSys: make(map[string][]byte),
MetaUser: make(map[string]string, len(fi.Metadata)),
}
for i := range fi.Parts {
// Only add etags if any.
if fi.Parts[i].ETag != "" {
ventry.ObjectV2.PartETags = make([]string, len(fi.Parts))
break
}
}
for i := range fi.Parts {
// Only add indices if any.
if len(fi.Parts[i].Index) > 0 {
ventry.ObjectV2.PartIndices = make([][]byte, len(fi.Parts))
break
}
}
for i := range fi.Erasure.Distribution {
ventry.ObjectV2.ErasureDist[i] = uint8(fi.Erasure.Distribution[i])
}
for i := range fi.Parts {
ventry.ObjectV2.PartSizes[i] = fi.Parts[i].Size
if len(ventry.ObjectV2.PartETags) > 0 && fi.Parts[i].ETag != "" {
ventry.ObjectV2.PartETags[i] = fi.Parts[i].ETag
}
ventry.ObjectV2.PartNumbers[i] = fi.Parts[i].Number
ventry.ObjectV2.PartActualSizes[i] = fi.Parts[i].ActualSize
if len(ventry.ObjectV2.PartIndices) > 0 {
ventry.ObjectV2.PartIndices[i] = fi.Parts[i].Index
}
}
tierFVIDKey := ReservedMetadataPrefixLower + tierFVID
tierFVMarkerKey := ReservedMetadataPrefixLower + tierFVMarker
for k, v := range fi.Metadata {
if len(k) > len(ReservedMetadataPrefixLower) && strings.EqualFold(k[:len(ReservedMetadataPrefixLower)], ReservedMetadataPrefixLower) {
// Skip tierFVID, tierFVMarker keys; it's used
// only for creating free-version.
// Skip xMinIOHealing, it's used only in RenameData
switch k {
case tierFVIDKey, tierFVMarkerKey, xMinIOHealing:
continue
}
ventry.ObjectV2.MetaSys[k] = []byte(v)
} else {
ventry.ObjectV2.MetaUser[k] = v
}
}
// If asked to save data.
if len(fi.Data) > 0 || fi.Size == 0 {
x.data.replace(fi.VersionID, fi.Data)
}
if fi.TransitionStatus != "" {
ventry.ObjectV2.MetaSys[metaTierStatus] = []byte(fi.TransitionStatus)
}
if fi.TransitionedObjName != "" {
ventry.ObjectV2.MetaSys[metaTierObjName] = []byte(fi.TransitionedObjName)
}
if fi.TransitionVersionID != "" {
ventry.ObjectV2.MetaSys[metaTierVersionID] = []byte(fi.TransitionVersionID)
}
if fi.TransitionTier != "" {
ventry.ObjectV2.MetaSys[metaTierName] = []byte(fi.TransitionTier)
}
if len(fi.Checksum) > 0 {
ventry.ObjectV2.MetaSys[ReservedMetadataPrefixLower+"crc"] = fi.Checksum
}
}
if !ventry.Valid() {
return errors.New("internal error: invalid version entry generated")
}
// Check if we should replace first.
for i := range x.versions {
if x.versions[i].header.VersionID != uv {
continue
}
switch x.versions[i].header.Type {
case LegacyType:
// This would convert legacy type into new ObjectType
// this means that we are basically purging the `null`
// version of the object.
return x.setIdx(i, ventry)
case ObjectType:
return x.setIdx(i, ventry)
case DeleteType:
// Allowing delete marker to replaced with proper
// object data type as well, this is not S3 complaint
// behavior but kept here for future flexibility.
return x.setIdx(i, ventry)
}
}
// We did not find it, add it.
return x.addVersion(ventry)
}
func (x *xlMetaV2) SharedDataDirCount(versionID [16]byte, dataDir [16]byte) int {
// v2 object is inlined, if it is skip dataDir share check.
if x.data.entries() > 0 && x.data.find(uuid.UUID(versionID).String()) != nil {
return 0
}
var sameDataDirCount int
var decoded xlMetaDataDirDecoder
for _, version := range x.versions {
if version.header.Type != ObjectType || version.header.VersionID == versionID || !version.header.UsesDataDir() {
continue
}
_, err := decoded.UnmarshalMsg(version.meta)
if err != nil || decoded.ObjectV2 == nil || decoded.ObjectV2.DataDir != dataDir {
continue
}
sameDataDirCount++
}
return sameDataDirCount
}
func (x *xlMetaV2) SharedDataDirCountStr(versionID, dataDir string) int {
var (
uv uuid.UUID
ddir uuid.UUID
err error
)
if versionID == nullVersionID {
versionID = ""
}
if versionID != "" {
uv, err = uuid.Parse(versionID)
if err != nil {
return 0
}
}
ddir, err = uuid.Parse(dataDir)
if err != nil {
return 0
}
return x.SharedDataDirCount(uv, ddir)
}
// AddLegacy adds a legacy version, is only called when no prior
// versions exist, safe to use it by only one function in xl-storage(RenameData)
func (x *xlMetaV2) AddLegacy(m *xlMetaV1Object) error {
if !m.valid() {
return errFileCorrupt
}
m.VersionID = nullVersionID
return x.addVersion(xlMetaV2Version{ObjectV1: m, Type: LegacyType, WrittenByVersion: globalVersionUnix})
}
// ToFileInfo converts xlMetaV2 into a common FileInfo datastructure
// for consumption across callers.
func (x xlMetaV2) ToFileInfo(volume, path, versionID string, inclFreeVers bool) (fi FileInfo, err error) {
var uv uuid.UUID
if versionID != "" && versionID != nullVersionID {
uv, err = uuid.Parse(versionID)
if err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("invalid versionID specified %s", versionID))
return fi, errFileVersionNotFound
}
}
var succModTime int64
isLatest := true
nonFreeVersions := len(x.versions)
var (
freeFi FileInfo
freeFound bool
)
found := false
for _, ver := range x.versions {
header := &ver.header
// skip listing free-version unless explicitly requested via versionID
if header.FreeVersion() {
nonFreeVersions--
// remember the latest free version; will return this FileInfo if no non-free version remain
var freeVersion xlMetaV2Version
if inclFreeVers && !freeFound {
// ignore unmarshalling errors, will return errFileNotFound in that case
if _, err := freeVersion.unmarshalV(x.metaV, ver.meta); err == nil {
if freeFi, err = freeVersion.ToFileInfo(volume, path); err == nil {
freeFi.IsLatest = true // when this is returned, it would be the latest free version remaining.
freeFound = true
}
}
}
if header.VersionID != uv {
continue
}
}
if found {
continue
}
// We need a specific version, skip...
if versionID != "" && uv != header.VersionID {
isLatest = false
succModTime = header.ModTime
continue
}
// We found what we need.
found = true
var version xlMetaV2Version
if _, err := version.unmarshalV(x.metaV, ver.meta); err != nil {
return fi, err
}
if fi, err = version.ToFileInfo(volume, path); err != nil {
return fi, err
}
fi.IsLatest = isLatest
if succModTime != 0 {
fi.SuccessorModTime = time.Unix(0, succModTime)
}
}
if !found {
if versionID == "" {
if inclFreeVers && nonFreeVersions == 0 {
if freeFound {
return freeFi, nil
}
}
return FileInfo{}, errFileNotFound
}
return FileInfo{}, errFileVersionNotFound
}
fi.NumVersions = nonFreeVersions
return fi, err
}
// ListVersions lists current versions, and current deleted
// versions returns error for unexpected entries.
// showPendingDeletes is set to true if ListVersions needs to list objects marked deleted
// but waiting to be replicated
func (x xlMetaV2) ListVersions(volume, path string) ([]FileInfo, error) {
versions := make([]FileInfo, 0, len(x.versions))
var err error
var dst xlMetaV2Version
for _, version := range x.versions {
_, err = dst.unmarshalV(x.metaV, version.meta)
if err != nil {
return versions, err
}
fi, err := dst.ToFileInfo(volume, path)
if err != nil {
return versions, err
}
fi.NumVersions = len(x.versions)
versions = append(versions, fi)
}
for i := range versions {
versions[i].NumVersions = len(versions)
if i > 0 {
versions[i].SuccessorModTime = versions[i-1].ModTime
}
}
if len(versions) > 0 {
versions[0].IsLatest = true
}
return versions, nil
}
// mergeXLV2Versions will merge all versions, typically from different disks
// that have at least quorum entries in all metas.
// Quorum must be the minimum number of matching metadata files.
// Quorum should be > 1 and <= len(versions).
// If strict is set to false, entries that match type
func mergeXLV2Versions(quorum int, strict bool, requestedVersions int, versions ...[]xlMetaV2ShallowVersion) (merged []xlMetaV2ShallowVersion) {
if quorum <= 0 {
quorum = 1
}
if len(versions) < quorum || len(versions) == 0 {
return nil
}
if len(versions) == 1 {
return versions[0]
}
if quorum == 1 {
// No need for non-strict checks if quorum is 1.
strict = true
}
// Shallow copy input
versions = append(make([][]xlMetaV2ShallowVersion, 0, len(versions)), versions...)
var nVersions int // captures all non-free versions
// Our result
merged = make([]xlMetaV2ShallowVersion, 0, len(versions[0]))
tops := make([]xlMetaV2ShallowVersion, len(versions))
for {
// Step 1 create slice with all top versions.
tops = tops[:0]
var topSig xlMetaV2VersionHeader
consistent := true // Are all signatures consistent (shortcut)
for _, vers := range versions {
if len(vers) == 0 {
consistent = false
continue
}
ver := vers[0]
if len(tops) == 0 {
consistent = true
topSig = ver.header
} else {
consistent = consistent && ver.header == topSig
}
tops = append(tops, vers[0])
}
// Check if done...
if len(tops) < quorum {
// We couldn't gather enough for quorum
break
}
var latest xlMetaV2ShallowVersion
if consistent {
// All had the same signature, easy.
latest = tops[0]
merged = append(merged, latest)
// Calculate latest 'n' non-free versions.
if !latest.header.FreeVersion() {
nVersions++
}
} else {
// Find latest.
var latestCount int
for i, ver := range tops {
if ver.header == latest.header {
latestCount++
continue
}
if i == 0 || ver.header.sortsBefore(latest.header) {
switch {
case i == 0 || latestCount == 0:
latestCount = 1
case !strict && ver.header.matchesNotStrict(latest.header):
latestCount++
default:
latestCount = 1
}
latest = ver
continue
}
// Mismatch, but older.
if latestCount > 0 && !strict && ver.header.matchesNotStrict(latest.header) {
latestCount++
continue
}
if latestCount > 0 && ver.header.VersionID == latest.header.VersionID {
// Version IDs match, but otherwise unable to resolve.
// We are either strict, or don't have enough information to match.
// Switch to a pure counting algo.
x := make(map[xlMetaV2VersionHeader]int, len(tops))
for _, a := range tops {
if a.header.VersionID != ver.header.VersionID {
continue
}
if !strict {
a.header.Signature = [4]byte{}
}
x[a.header]++
}
latestCount = 0
for k, v := range x {
if v < latestCount {
continue
}
if v == latestCount && latest.header.sortsBefore(k) {
// Tiebreak, use sort.
continue
}
for _, a := range tops {
hdr := a.header
if !strict {
hdr.Signature = [4]byte{}
}
if hdr == k {
latest = a
}
}
latestCount = v
}
break
}
}
if latestCount >= quorum {
merged = append(merged, latest)
// Calculate latest 'n' non-free versions.
if !latest.header.FreeVersion() {
nVersions++
}
}
}
// Remove from all streams up until latest modtime or if selected.
for i, vers := range versions {
for _, ver := range vers {
// Truncate later modtimes, not selected.
if ver.header.ModTime > latest.header.ModTime {
versions[i] = versions[i][1:]
continue
}
// Truncate matches
if ver.header == latest.header {
versions[i] = versions[i][1:]
continue
}
// Truncate non-empty version and type matches
if latest.header.VersionID == ver.header.VersionID {
versions[i] = versions[i][1:]
continue
}
// Skip versions with version id we already emitted.
for _, mergedV := range merged {
if ver.header.VersionID == mergedV.header.VersionID {
versions[i] = versions[i][1:]
continue
}
}
// Keep top entry (and remaining)...
break
}
}
if requestedVersions > 0 && requestedVersions == nVersions {
merged = append(merged, versions[0]...)
break
}
}
// Sanity check. Enable if duplicates show up.
if false {
found := make(map[[16]byte]struct{})
for _, ver := range merged {
if _, ok := found[ver.header.VersionID]; ok {
panic("found dupe")
}
found[ver.header.VersionID] = struct{}{}
}
}
return merged
}
type xlMetaBuf []byte
// ToFileInfo converts xlMetaV2 into a common FileInfo datastructure
// for consumption across callers.
func (x xlMetaBuf) ToFileInfo(volume, path, versionID string) (fi FileInfo, err error) {
var uv uuid.UUID
if versionID != "" && versionID != nullVersionID {
uv, err = uuid.Parse(versionID)
if err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("invalid versionID specified %s", versionID))
return fi, errFileVersionNotFound
}
}
versions, headerV, metaV, buf, err := decodeXLHeaders(x)
if err != nil {
return fi, err
}
var header xlMetaV2VersionHeader
var succModTime int64
isLatest := true
nonFreeVersions := versions
found := false
err = decodeVersions(buf, versions, func(idx int, hdr, meta []byte) error {
if _, err := header.unmarshalV(headerV, hdr); err != nil {
return err
}
// skip listing free-version unless explicitly requested via versionID
if header.FreeVersion() {
nonFreeVersions--
if header.VersionID != uv {
return nil
}
}
if found {
return nil
}
// We need a specific version, skip...
if versionID != "" && uv != header.VersionID {
isLatest = false
succModTime = header.ModTime
return nil
}
// We found what we need.
found = true
var version xlMetaV2Version
if _, err := version.unmarshalV(metaV, meta); err != nil {
return err
}
if fi, err = version.ToFileInfo(volume, path); err != nil {
return err
}
fi.IsLatest = isLatest
if succModTime != 0 {
fi.SuccessorModTime = time.Unix(0, succModTime)
}
return nil
})
if !found {
if versionID == "" {
return FileInfo{}, errFileNotFound
}
return FileInfo{}, errFileVersionNotFound
}
fi.NumVersions = nonFreeVersions
return fi, err
}
// ListVersions lists current versions, and current deleted
// versions returns error for unexpected entries.
// showPendingDeletes is set to true if ListVersions needs to list objects marked deleted
// but waiting to be replicated
func (x xlMetaBuf) ListVersions(volume, path string) ([]FileInfo, error) {
vers, _, metaV, buf, err := decodeXLHeaders(x)
if err != nil {
return nil, err
}
var succModTime time.Time
isLatest := true
dst := make([]FileInfo, 0, vers)
var xl xlMetaV2Version
err = decodeVersions(buf, vers, func(idx int, hdr, meta []byte) error {
if _, err := xl.unmarshalV(metaV, meta); err != nil {
return err
}
if !xl.Valid() {
return errFileCorrupt
}
fi, err := xl.ToFileInfo(volume, path)
if err != nil {
return err
}
fi.IsLatest = isLatest
fi.SuccessorModTime = succModTime
fi.NumVersions = vers
isLatest = false
succModTime = xl.getModTime()
dst = append(dst, fi)
return nil
})
return dst, err
}
// IsLatestDeleteMarker returns true if latest version is a deletemarker or there are no versions.
// If any error occurs false is returned.
func (x xlMetaBuf) IsLatestDeleteMarker() bool {
vers, headerV, _, buf, err := decodeXLHeaders(x)
if err != nil {
return false
}
if vers == 0 {
return true
}
isDeleteMarker := false
_ = decodeVersions(buf, vers, func(idx int, hdr, _ []byte) error {
var xl xlMetaV2VersionHeader
if _, err := xl.unmarshalV(headerV, hdr); err != nil {
return errDoneForNow
}
isDeleteMarker = xl.Type == DeleteType
return errDoneForNow
})
return isDeleteMarker
}
// AllHidden returns true are no versions that would show up in a listing (ie all free markers)
// Optionally also return early if top is a delete marker.
func (x xlMetaBuf) AllHidden(topDeleteMarker bool) bool {
vers, headerV, _, buf, err := decodeXLHeaders(x)
if err != nil {
return false
}
if vers == 0 {
return true
}
hidden := true
var xl xlMetaV2VersionHeader
_ = decodeVersions(buf, vers, func(idx int, hdr, _ []byte) error {
if _, err := xl.unmarshalV(headerV, hdr); err != nil {
return errDoneForNow
}
if topDeleteMarker && idx == 0 && xl.Type == DeleteType {
hidden = true
return errDoneForNow
}
if !xl.FreeVersion() {
hidden = false
return errDoneForNow
}
// Check next version
return nil
})
return hidden
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package deadlineconn
import (
"bufio"
"io"
"net"
"sync"
"testing"
"time"
)
// Test deadlineconn handles read timeout properly by reading two messages beyond deadline.
func TestBuffConnReadTimeout(t *testing.T) {
l, err := net.Listen("tcp", "localhost:0")
if err != nil {
t.Fatalf("unable to create listener. %v", err)
}
defer l.Close()
serverAddr := l.Addr().String()
tcpListener, ok := l.(*net.TCPListener)
if !ok {
t.Fatalf("failed to assert to net.TCPListener")
}
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
tcpConn, terr := tcpListener.AcceptTCP()
if terr != nil {
t.Errorf("failed to accept new connection. %v", terr)
return
}
deadlineconn := New(tcpConn)
deadlineconn.WithReadDeadline(time.Second)
deadlineconn.WithWriteDeadline(time.Second)
defer deadlineconn.Close()
// Read a line
b := make([]byte, 12)
_, terr = deadlineconn.Read(b)
if terr != nil {
t.Errorf("failed to read from client. %v", terr)
return
}
received := string(b)
if received != "message one\n" {
t.Errorf(`server: expected: "message one\n", got: %v`, received)
return
}
// Wait for more than read timeout to simulate processing.
time.Sleep(3 * time.Second)
_, terr = deadlineconn.Read(b)
if terr != nil {
t.Errorf("failed to read from client. %v", terr)
return
}
received = string(b)
if received != "message two\n" {
t.Errorf(`server: expected: "message two\n", got: %v`, received)
return
}
// Send a response.
_, terr = io.WriteString(deadlineconn, "messages received\n")
if terr != nil {
t.Errorf("failed to write to client. %v", terr)
return
}
}()
c, err := net.Dial("tcp", serverAddr)
if err != nil {
t.Fatalf("unable to connect to server. %v", err)
}
defer c.Close()
_, err = io.WriteString(c, "message one\n")
if err != nil {
t.Fatalf("failed to write to server. %v", err)
}
_, err = io.WriteString(c, "message two\n")
if err != nil {
t.Fatalf("failed to write to server. %v", err)
}
received, err := bufio.NewReader(c).ReadString('\n')
if err != nil {
t.Fatalf("failed to read from server. %v", err)
}
if received != "messages received\n" {
t.Fatalf(`client: expected: "messages received\n", got: %v`, received)
}
wg.Wait()
}
<file_sep># OPA Quickstart Guide [](https://slack.minio.io)
OPA is a lightweight general-purpose policy engine that can be co-located with MinIO server, in this document we talk about how to use OPA HTTP API to authorize requests. It can be used with any type of credentials (STS based like OpenID or LDAP, regular IAM users or service accounts).
OPA is enabled through MinIO's Access Management Plugin feature.
## Get started
### 1. Start OPA in a container
```sh
podman run -it \
--name opa \
--publish 8181:8181 \
docker.io/openpolicyagent/opa:0.40.0-rootless \
run --server \
--log-format=json-pretty \
--log-level=debug \
--set=decision_logs.console=true
```
### 2. Create a sample OPA Policy
In another terminal, create a policy that allows root user all access and for all other users denies `PutObject`:
```sh
cat > example.rego <<EOF
package httpapi.authz
import input
default allow = false
# Allow the root user to perform any action.
allow {
input.owner == true
}
# All other users may do anything other than call PutObject
allow {
input.action != "s3:PutObject"
input.owner == false
}
EOF
```
Then load the policy via OPA's REST API.
```
curl -X PUT --data-binary @example.rego \
localhost:8181/v1/policies/putobject
```
### 4. Setup MinIO with OPA
Set the `MINIO_POLICY_PLUGIN_URL` as the endpoint that MinIO should send authorization requests to. Then start the server.
```sh
export MINIO_POLICY_PLUGIN_URL=http://localhost:8181/v1/data/httpapi/authz/allow
export MINIO_CI_CD=1
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=<PASSWORD>3
minio server /mnt/data
```
### 5. Test with a regular IAM user
Ensure that `mc` is installed and the configured with the above server with the alias `myminio`.
```sh
# 1. Create a bucket and a user, and upload a file. These operations will succeed.
mc mb myminio/test
mc admin user add myminio foo foobar123
mc cp /etc/issue myminio/test/
# 2. Now access the server as user `foo`. These operations will also succeed.
export MC_HOST_foo=http://foo:foobar123@localhost:9000
mc ls foo/test
mc cat foo/test/issue
# 3. Attempt to upload an object as user `foo` - this will fail with a permissions error.
mc cp /etc/issue myminio/test/issue2
```
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package dsync
import (
"math/rand"
"time"
)
func backoffWait(min, unit, cap time.Duration) func(*rand.Rand, uint) time.Duration {
if unit > time.Hour {
// Protect against integer overflow
panic("unit cannot exceed one hour")
}
return func(r *rand.Rand, attempt uint) time.Duration {
sleep := min
sleep += unit * time.Duration(attempt)
if sleep > cap {
sleep = cap
}
sleep -= time.Duration(r.Float64() * float64(sleep))
return sleep
}
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"math/rand"
"net/http"
"runtime"
"strconv"
"strings"
"time"
jsoniter "github.com/json-iterator/go"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/internal/crypto"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/env"
"github.com/minio/pkg/wildcard"
"github.com/minio/pkg/workers"
)
// keyrotate:
// apiVersion: v1
// bucket: BUCKET
// prefix: PREFIX
// encryption:
// type: sse-s3 # valid values are sse-s3 and sse-kms
// key: <new-kms-key> # valid only for sse-kms
// context: <new-kms-key-context> # valid only for sse-kms
// # optional flags based filtering criteria
// # for all objects
// flags:
// filter:
// newerThan: "7d" # match objects newer than this value (e.g. 7d10h31s)
// olderThan: "7d" # match objects older than this value (e.g. 7d10h31s)
// createdAfter: "date" # match objects created after "date"
// createdBefore: "date" # match objects created before "date"
// tags:
// - key: "name"
// value: "pick*" # match objects with tag 'name', with all values starting with 'pick'
// metadata:
// - key: "content-type"
// value: "image/*" # match objects with 'content-type', with all values starting with 'image/'
// kmskey: "key-id" # match objects with KMS key-id (applicable only for sse-kms)
// notify:
// endpoint: "https://notify.endpoint" # notification endpoint to receive job status events
// token: "Bearer x<PASSWORD>" # optional authentication token for the notification endpoint
// retry:
// attempts: 10 # number of retries for the job before giving up
// delay: "500ms" # least amount of delay between each retry
//go:generate msgp -file $GOFILE -unexported
// BatchKeyRotateKV is a datatype that holds key and values for filtering of objects
// used by metadata filter as well as tags based filtering.
type BatchKeyRotateKV struct {
Key string `yaml:"key" json:"key"`
Value string `yaml:"value" json:"value"`
}
// Validate returns an error if key is empty
func (kv BatchKeyRotateKV) Validate() error {
if kv.Key == "" {
return errInvalidArgument
}
return nil
}
// Empty indicates if kv is not set
func (kv BatchKeyRotateKV) Empty() bool {
return kv.Key == "" && kv.Value == ""
}
// Match matches input kv with kv, value will be wildcard matched depending on the user input
func (kv BatchKeyRotateKV) Match(ikv BatchKeyRotateKV) bool {
if kv.Empty() {
return true
}
if strings.EqualFold(kv.Key, ikv.Key) {
return wildcard.Match(kv.Value, ikv.Value)
}
return false
}
// BatchKeyRotateRetry datatype represents total retry attempts and delay between each retries.
type BatchKeyRotateRetry struct {
Attempts int `yaml:"attempts" json:"attempts"` // number of retry attempts
Delay time.Duration `yaml:"delay" json:"delay"` // delay between each retries
}
// Validate validates input replicate retries.
func (r BatchKeyRotateRetry) Validate() error {
if r.Attempts < 0 {
return errInvalidArgument
}
if r.Delay < 0 {
return errInvalidArgument
}
return nil
}
// BatchKeyRotationType defines key rotation type
type BatchKeyRotationType string
const (
sses3 BatchKeyRotationType = "sse-s3"
ssekms BatchKeyRotationType = "sse-kms"
)
// BatchJobKeyRotateEncryption defines key rotation encryption options passed
type BatchJobKeyRotateEncryption struct {
Type BatchKeyRotationType `yaml:"type" json:"type"`
Key string `yaml:"key" json:"key"`
Context string `yaml:"context" json:"context"`
kmsContext kms.Context `msg:"-"`
}
// Validate validates input key rotation encryption options.
func (e BatchJobKeyRotateEncryption) Validate() error {
if e.Type != sses3 && e.Type != ssekms {
return errInvalidArgument
}
spaces := strings.HasPrefix(e.Key, " ") || strings.HasSuffix(e.Key, " ")
if e.Type == ssekms && spaces {
return crypto.ErrInvalidEncryptionKeyID
}
if e.Type == ssekms && GlobalKMS != nil {
ctx := kms.Context{}
if e.Context != "" {
b, err := base64.StdEncoding.DecodeString(e.Context)
if err != nil {
return err
}
json := jsoniter.ConfigCompatibleWithStandardLibrary
if err := json.Unmarshal(b, &ctx); err != nil {
return err
}
}
e.kmsContext = kms.Context{}
for k, v := range ctx {
e.kmsContext[k] = v
}
ctx["MinIO batch API"] = "batchrotate" // Context for a test key operation
if _, err := GlobalKMS.GenerateKey(GlobalContext, e.Key, ctx); err != nil {
return err
}
}
return nil
}
// BatchKeyRotateFilter holds all the filters currently supported for batch replication
type BatchKeyRotateFilter struct {
NewerThan time.Duration `yaml:"newerThan,omitempty" json:"newerThan"`
OlderThan time.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
CreatedAfter time.Time `yaml:"createdAfter,omitempty" json:"createdAfter"`
CreatedBefore time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
Tags []BatchKeyRotateKV `yaml:"tags,omitempty" json:"tags"`
Metadata []BatchKeyRotateKV `yaml:"metadata,omitempty" json:"metadata"`
KMSKeyID string `yaml:"kmskeyid" json:"kmskey"`
}
// BatchKeyRotateNotification success or failure notification endpoint for each job attempts
type BatchKeyRotateNotification struct {
Endpoint string `yaml:"endpoint" json:"endpoint"`
Token string `yaml:"token" json:"token"`
}
// BatchJobKeyRotateFlags various configurations for replication job definition currently includes
// - filter
// - notify
// - retry
type BatchJobKeyRotateFlags struct {
Filter BatchKeyRotateFilter `yaml:"filter" json:"filter"`
Notify BatchKeyRotateNotification `yaml:"notify" json:"notify"`
Retry BatchKeyRotateRetry `yaml:"retry" json:"retry"`
}
// BatchJobKeyRotateV1 v1 of batch key rotation job
type BatchJobKeyRotateV1 struct {
APIVersion string `yaml:"apiVersion" json:"apiVersion"`
Flags BatchJobKeyRotateFlags `yaml:"flags" json:"flags"`
Bucket string `yaml:"bucket" json:"bucket"`
Prefix string `yaml:"prefix" json:"prefix"`
Endpoint string `yaml:"endpoint" json:"endpoint"`
Encryption BatchJobKeyRotateEncryption `yaml:"encryption" json:"encryption"`
}
// Notify notifies notification endpoint if configured regarding job failure or success.
func (r BatchJobKeyRotateV1) Notify(ctx context.Context, body io.Reader) error {
if r.Flags.Notify.Endpoint == "" {
return nil
}
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
req, err := http.NewRequestWithContext(ctx, http.MethodPost, r.Flags.Notify.Endpoint, body)
if err != nil {
return err
}
if r.Flags.Notify.Token != "" {
req.Header.Set("Authorization", r.Flags.Notify.Token)
}
clnt := http.Client{Transport: getRemoteInstanceTransport}
resp, err := clnt.Do(req)
if err != nil {
return err
}
xhttp.DrainBody(resp.Body)
if resp.StatusCode != http.StatusOK {
return errors.New(resp.Status)
}
return nil
}
// KeyRotate rotates encryption key of an object
func (r *BatchJobKeyRotateV1) KeyRotate(ctx context.Context, api ObjectLayer, objInfo ObjectInfo) error {
srcBucket := r.Bucket
srcObject := objInfo.Name
if objInfo.DeleteMarker || !objInfo.VersionPurgeStatus.Empty() {
return nil
}
sseKMS := crypto.S3KMS.IsEncrypted(objInfo.UserDefined)
sseS3 := crypto.S3.IsEncrypted(objInfo.UserDefined)
if !sseKMS && !sseS3 { // neither sse-s3 nor sse-kms disallowed
return errInvalidEncryptionParameters
}
if sseKMS && r.Encryption.Type == sses3 { // previously encrypted with sse-kms, now sse-s3 disallowed
return errInvalidEncryptionParameters
}
versioned := globalBucketVersioningSys.PrefixEnabled(srcBucket, srcObject)
versionSuspended := globalBucketVersioningSys.PrefixSuspended(srcBucket, srcObject)
lock := api.NewNSLock(r.Bucket, objInfo.Name)
lkctx, err := lock.GetLock(ctx, globalOperationTimeout)
if err != nil {
return err
}
ctx = lkctx.Context()
defer lock.Unlock(lkctx)
opts := ObjectOptions{
VersionID: objInfo.VersionID,
Versioned: versioned,
VersionSuspended: versionSuspended,
NoLock: true,
}
obj, err := api.GetObjectInfo(ctx, r.Bucket, objInfo.Name, opts)
if err != nil {
return err
}
oi := obj.Clone()
var (
newKeyID string
newKeyContext kms.Context
)
encMetadata := make(map[string]string)
for k, v := range oi.UserDefined {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
encMetadata[k] = v
}
}
if (sseKMS || sseS3) && r.Encryption.Type == ssekms {
if err = r.Encryption.Validate(); err != nil {
return err
}
newKeyID = strings.TrimPrefix(r.Encryption.Key, crypto.ARNPrefix)
newKeyContext = r.Encryption.kmsContext
}
if err = rotateKey(ctx, []byte{}, newKeyID, []byte{}, r.Bucket, oi.Name, encMetadata, newKeyContext); err != nil {
return err
}
// Since we are rotating the keys, make sure to update the metadata.
oi.metadataOnly = true
oi.keyRotation = true
for k, v := range encMetadata {
oi.UserDefined[k] = v
}
if _, err := api.CopyObject(ctx, r.Bucket, oi.Name, r.Bucket, oi.Name, oi, ObjectOptions{
VersionID: oi.VersionID,
}, ObjectOptions{
VersionID: oi.VersionID,
NoLock: true,
}); err != nil {
return err
}
return nil
}
const (
batchKeyRotationName = "batch-rotate.bin"
batchKeyRotationFormat = 1
batchKeyRotateVersionV1 = 1
batchKeyRotateVersion = batchKeyRotateVersionV1
batchKeyRotateAPIVersion = "v1"
batchKeyRotateJobDefaultRetries = 3
batchKeyRotateJobDefaultRetryDelay = 250 * time.Millisecond
)
// Start the batch key rottion job, resumes if there was a pending job via "job.ID"
func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job BatchJobRequest) error {
ri := &batchJobInfo{
JobID: job.ID,
JobType: string(job.Type()),
StartTime: job.Started,
}
if err := ri.load(ctx, api, job); err != nil {
return err
}
globalBatchJobsMetrics.save(job.ID, ri)
lastObject := ri.Object
delay := job.KeyRotate.Flags.Retry.Delay
if delay == 0 {
delay = batchKeyRotateJobDefaultRetryDelay
}
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
skip := func(info FileInfo) (ok bool) {
if r.Flags.Filter.OlderThan > 0 && time.Since(info.ModTime) < r.Flags.Filter.OlderThan {
// skip all objects that are newer than specified older duration
return false
}
if r.Flags.Filter.NewerThan > 0 && time.Since(info.ModTime) >= r.Flags.Filter.NewerThan {
// skip all objects that are older than specified newer duration
return false
}
if !r.Flags.Filter.CreatedAfter.IsZero() && r.Flags.Filter.CreatedAfter.Before(info.ModTime) {
// skip all objects that are created before the specified time.
return false
}
if !r.Flags.Filter.CreatedBefore.IsZero() && r.Flags.Filter.CreatedBefore.After(info.ModTime) {
// skip all objects that are created after the specified time.
return false
}
if len(r.Flags.Filter.Tags) > 0 {
// Only parse object tags if tags filter is specified.
tagMap := map[string]string{}
tagStr := info.Metadata[xhttp.AmzObjectTagging]
if len(tagStr) != 0 {
t, err := tags.ParseObjectTags(tagStr)
if err != nil {
return false
}
tagMap = t.ToMap()
}
for _, kv := range r.Flags.Filter.Tags {
for t, v := range tagMap {
if kv.Match(BatchKeyRotateKV{Key: t, Value: v}) {
return true
}
}
}
// None of the provided tags filter match skip the object
return false
}
if len(r.Flags.Filter.Metadata) > 0 {
for _, kv := range r.Flags.Filter.Metadata {
for k, v := range info.Metadata {
if !strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") && !isStandardHeader(k) {
continue
}
// We only need to match x-amz-meta or standardHeaders
if kv.Match(BatchKeyRotateKV{Key: k, Value: v}) {
return true
}
}
}
// None of the provided metadata filters match skip the object.
return false
}
if r.Flags.Filter.KMSKeyID != "" {
if v, ok := info.Metadata[xhttp.AmzServerSideEncryptionKmsID]; ok && strings.TrimPrefix(v, crypto.ARNPrefix) != r.Flags.Filter.KMSKeyID {
return false
}
}
return true
}
workerSize, err := strconv.Atoi(env.Get("_MINIO_BATCH_KEYROTATION_WORKERS", strconv.Itoa(runtime.GOMAXPROCS(0)/2)))
if err != nil {
return err
}
wk, err := workers.New(workerSize)
if err != nil {
// invalid worker size.
return err
}
retryAttempts := ri.RetryAttempts
ctx, cancel := context.WithCancel(ctx)
results := make(chan ObjectInfo, 100)
if err := api.Walk(ctx, r.Bucket, r.Prefix, results, ObjectOptions{
WalkMarker: lastObject,
WalkFilter: skip,
}); err != nil {
cancel()
// Do not need to retry if we can't list objects on source.
return err
}
for result := range results {
result := result
sseKMS := crypto.S3KMS.IsEncrypted(result.UserDefined)
sseS3 := crypto.S3.IsEncrypted(result.UserDefined)
if !sseKMS && !sseS3 { // neither sse-s3 nor sse-kms disallowed
continue
}
wk.Take()
go func() {
defer wk.Give()
for attempts := 1; attempts <= retryAttempts; attempts++ {
attempts := attempts
stopFn := globalBatchJobsMetrics.trace(batchKeyRotationMetricObject, job.ID, attempts, result)
success := true
if err := r.KeyRotate(ctx, api, result); err != nil {
stopFn(err)
logger.LogIf(ctx, err)
success = false
} else {
stopFn(nil)
}
ri.trackCurrentBucketObject(r.Bucket, result, success)
ri.RetryAttempts = attempts
globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk after every 10secs.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
if success {
break
}
}
}()
}
wk.Wait()
ri.Complete = ri.ObjectsFailed == 0
ri.Failed = ri.ObjectsFailed > 0
globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
buf, _ := json.Marshal(ri)
if err := r.Notify(ctx, bytes.NewReader(buf)); err != nil {
logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err))
}
cancel()
if ri.Failed {
ri.ObjectsFailed = 0
ri.Bucket = ""
ri.Object = ""
ri.Objects = 0
time.Sleep(delay + time.Duration(rnd.Float64()*float64(delay)))
}
return nil
}
//msgp:ignore batchKeyRotationJobError
type batchKeyRotationJobError struct {
Code string
Description string
HTTPStatusCode int
}
func (e batchKeyRotationJobError) Error() string {
return e.Description
}
// Validate validates the job definition input
func (r *BatchJobKeyRotateV1) Validate(ctx context.Context, job BatchJobRequest, o ObjectLayer) error {
if r == nil {
return nil
}
if r.APIVersion != batchKeyRotateAPIVersion {
return errInvalidArgument
}
if r.Bucket == "" {
return errInvalidArgument
}
if _, err := o.GetBucketInfo(ctx, r.Bucket, BucketOptions{}); err != nil {
if isErrBucketNotFound(err) {
return batchKeyRotationJobError{
Code: "NoSuchSourceBucket",
Description: "The specified source bucket does not exist",
HTTPStatusCode: http.StatusNotFound,
}
}
return err
}
if GlobalKMS == nil {
return errKMSNotConfigured
}
if err := r.Encryption.Validate(); err != nil {
return err
}
for _, tag := range r.Flags.Filter.Tags {
if err := tag.Validate(); err != nil {
return err
}
}
for _, meta := range r.Flags.Filter.Metadata {
if err := meta.Validate(); err != nil {
return err
}
}
if err := r.Flags.Retry.Validate(); err != nil {
return err
}
return nil
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package kms
import (
"context"
"github.com/minio/kes-go"
)
// IdentityManager is the generic interface that handles KMS identity operations
type IdentityManager interface {
// DescribeIdentity describes an identity by returning its metadata.
// e.g. which policy is currently assigned and whether its an admin identity.
DescribeIdentity(ctx context.Context, identity string) (*kes.IdentityInfo, error)
// DescribeSelfIdentity describes the identity issuing the request.
// It infers the identity from the TLS client certificate used to authenticate.
// It returns the identity and policy information for the client identity.
DescribeSelfIdentity(ctx context.Context) (*kes.IdentityInfo, *kes.Policy, error)
// DeleteIdentity deletes an identity from KMS.
// The client certificate that corresponds to the identity is no longer authorized to perform any API operations.
// The admin identity cannot be deleted.
DeleteIdentity(ctx context.Context, identity string) error
// ListIdentities list all identity metadata that match the specified pattern.
// In particular, the pattern * lists all identity metadata.
ListIdentities(ctx context.Context, pattern string) (*kes.IdentityIterator, error)
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package once
import (
"context"
"sync"
"sync/atomic"
)
// Inspired from Golang sync.Once but it is only marked
// initialized when the provided function returns nil.
// Init represents the structure.
type Init struct {
done uint32
m sync.Mutex
}
// Do is similar to sync.Once.Do - makes one successful
// call to the function. ie, it invokes the function
// if it is not successful yet.
func (l *Init) Do(f func() error) error {
if atomic.LoadUint32(&l.done) == 0 {
return l.do(f)
}
return nil
}
func (l *Init) do(f func() error) error {
l.m.Lock()
defer l.m.Unlock()
if atomic.LoadUint32(&l.done) == 0 {
if err := f(); err != nil {
return err
}
// Mark as done only when f() is successful
atomic.StoreUint32(&l.done, 1)
}
return nil
}
// DoWithContext is similar to Do except that it accepts a context as an argument to be passed.
func (l *Init) DoWithContext(ctx context.Context, f func(context.Context) error) error {
if atomic.LoadUint32(&l.done) == 0 {
return l.doWithContext(ctx, f)
}
return nil
}
func (l *Init) doWithContext(ctx context.Context, f func(context.Context) error) error {
l.m.Lock()
defer l.m.Unlock()
if atomic.LoadUint32(&l.done) == 0 {
if err := f(ctx); err != nil {
return err
}
// Mark as done only when f() is successful
atomic.StoreUint32(&l.done, 1)
}
return nil
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bufio"
crand "crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/json"
"encoding/pem"
"flag"
"fmt"
"io"
"os"
"strings"
"time"
)
var (
keyHex = flag.String("key", "", "decryption key")
privKeyPath = flag.String("private-key", "support_private.pem", "private key")
stdin = flag.Bool("stdin", false, "expect 'mc support inspect' json output from stdin")
export = flag.Bool("export", false, "export xl.meta")
djson = flag.Bool("djson", false, "expect djson format for xl.meta")
genkey = flag.Bool("genkey", false, "generate key pair")
)
func main() {
flag.Parse()
if *genkey {
generateKeys()
os.Exit(0)
}
var privateKey []byte
if *keyHex == "" {
if b, err := os.ReadFile(*privKeyPath); err == nil {
privateKey = b
fmt.Println("Using private key from", *privKeyPath)
}
// Prompt for decryption key if no --key or --private-key are provided
if len(privateKey) == 0 {
reader := bufio.NewReader(os.Stdin)
fmt.Print("Enter Decryption Key: ")
text, _ := reader.ReadString('\n')
// convert CRLF to LF
*keyHex = strings.ReplaceAll(text, "\n", "")
*keyHex = strings.TrimSpace(*keyHex)
}
}
var inputFileName, outputFileName string
// Parse parameters
switch {
case *stdin:
// Parse 'mc support inspect --json' output
input := struct {
File string `json:"file"`
Key string `json:"key"`
}{}
got, err := io.ReadAll(os.Stdin)
if err != nil {
fatalErr(err)
}
fatalErr(json.Unmarshal(got, &input))
inputFileName = input.File
*keyHex = input.Key
case len(flag.Args()) == 1:
inputFileName = flag.Args()[0]
default:
flag.Usage()
fatalIf(true, "Only 1 file can be decrypted")
os.Exit(1)
}
// Calculate the output file name
switch {
case strings.HasSuffix(inputFileName, ".enc"):
outputFileName = strings.TrimSuffix(inputFileName, ".enc") + ".zip"
case strings.HasSuffix(inputFileName, ".zip"):
outputFileName = strings.TrimSuffix(inputFileName, ".zip") + ".decrypted.zip"
}
// Backup any already existing output file
_, err := os.Stat(outputFileName)
if err == nil {
err := os.Rename(outputFileName, outputFileName+"."+time.Now().Format("20060102150405"))
if err != nil {
fatalErr(err)
}
}
// Open the input and create the output file
input, err := os.Open(inputFileName)
fatalErr(err)
defer input.Close()
output, err := os.Create(outputFileName)
fatalErr(err)
// Decrypt the inspect data
switch {
case *keyHex != "":
err = extractInspectV1(*keyHex, input, output)
case len(privateKey) != 0:
err = extractInspectV2(privateKey, input, output)
}
output.Close()
if err != nil {
os.Remove(outputFileName)
fatalErr(err)
}
fmt.Println("output written to", outputFileName)
// Export xl.meta to stdout
if *export {
fatalErr(inspectToExportType(outputFileName, *djson))
os.Remove(outputFileName)
}
}
func generateKeys() {
privatekey, err := rsa.GenerateKey(crand.Reader, 2048)
if err != nil {
fmt.Printf("error generating key: %s n", err)
os.Exit(1)
}
// dump private key to file
privateKeyBytes := x509.MarshalPKCS1PrivateKey(privatekey)
privateKeyBlock := &pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: privateKeyBytes,
}
privatePem, err := os.Create("support_private.pem")
if err != nil {
fmt.Printf("error when create private.pem: %s n", err)
os.Exit(1)
}
err = pem.Encode(privatePem, privateKeyBlock)
if err != nil {
fmt.Printf("error when encode private pem: %s n", err)
os.Exit(1)
}
// dump public key to file
publicKeyBytes := x509.MarshalPKCS1PublicKey(&privatekey.PublicKey)
if err != nil {
fmt.Printf("error when dumping publickey: %s n", err)
os.Exit(1)
}
publicKeyBlock := &pem.Block{
Type: "PUBLIC KEY",
Bytes: publicKeyBytes,
}
publicPem, err := os.Create("support_public.pem")
if err != nil {
fmt.Printf("error when create public.pem: %s n", err)
os.Exit(1)
}
err = pem.Encode(publicPem, publicKeyBlock)
if err != nil {
fmt.Printf("error when encode public pem: %s n", err)
os.Exit(1)
}
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package http
import (
"bytes"
"fmt"
"io"
"net/http"
"time"
)
// ResponseRecorder - is a wrapper to trap the http response
// status code and to record the response body
type ResponseRecorder struct {
http.ResponseWriter
StatusCode int
// Log body of 4xx or 5xx responses
LogErrBody bool
// Log body of all responses
LogAllBody bool
TimeToFirstByte time.Duration
StartTime time.Time
// number of bytes written
bytesWritten int
// number of bytes of response headers written
headerBytesWritten int
// Internal recording buffer
headers bytes.Buffer
body bytes.Buffer
// Indicate if headers are written in the log
headersLogged bool
}
// NewResponseRecorder - returns a wrapped response writer to trap
// http status codes for auditing purposes.
func NewResponseRecorder(w http.ResponseWriter) *ResponseRecorder {
return &ResponseRecorder{
ResponseWriter: w,
StatusCode: http.StatusOK,
StartTime: time.Now().UTC(),
}
}
func (lrw *ResponseRecorder) Write(p []byte) (int, error) {
if !lrw.headersLogged {
// We assume the response code to be '200 OK' when WriteHeader() is not called,
// that way following Golang HTTP response behavior.
lrw.WriteHeader(http.StatusOK)
}
n, err := lrw.ResponseWriter.Write(p)
lrw.bytesWritten += n
if lrw.TimeToFirstByte == 0 {
lrw.TimeToFirstByte = time.Now().UTC().Sub(lrw.StartTime)
}
gzipped := lrw.Header().Get("Content-Encoding") == "gzip"
if !gzipped && ((lrw.LogErrBody && lrw.StatusCode >= http.StatusBadRequest) || lrw.LogAllBody) {
// Always logging error responses.
lrw.body.Write(p)
}
if err != nil {
return n, err
}
return n, err
}
// Write the headers into the given buffer
func (lrw *ResponseRecorder) writeHeaders(w io.Writer, statusCode int, headers http.Header) {
n, _ := fmt.Fprintf(w, "%d %s\n", statusCode, http.StatusText(statusCode))
lrw.headerBytesWritten += n
for k, v := range headers {
n, _ := fmt.Fprintf(w, "%s: %s\n", k, v[0])
lrw.headerBytesWritten += n
}
}
// blobBody returns a dummy body placeholder for blob (binary stream)
var blobBody = []byte("<BLOB>")
// gzippedBody returns a dummy body placeholder for gzipped content
var gzippedBody = []byte("<GZIP>")
// Body - Return response body.
func (lrw *ResponseRecorder) Body() []byte {
if lrw.Header().Get("Content-Encoding") == "gzip" {
// ... otherwise we return the <GZIP> place holder
return gzippedBody
}
// If there was an error response or body logging is enabled
// then we return the body contents
if (lrw.LogErrBody && lrw.StatusCode >= http.StatusBadRequest) || lrw.LogAllBody {
return lrw.body.Bytes()
}
// ... otherwise we return the <BLOB> place holder
return blobBody
}
// WriteHeader - writes http status code
func (lrw *ResponseRecorder) WriteHeader(code int) {
if !lrw.headersLogged {
lrw.StatusCode = code
lrw.writeHeaders(&lrw.headers, code, lrw.ResponseWriter.Header())
lrw.headersLogged = true
lrw.ResponseWriter.WriteHeader(code)
}
}
// Flush - Calls the underlying Flush.
func (lrw *ResponseRecorder) Flush() {
lrw.ResponseWriter.(http.Flusher).Flush()
}
// Size - returns the number of bytes written
func (lrw *ResponseRecorder) Size() int {
return lrw.bytesWritten
}
// HeaderSize - returns the number of bytes of response headers written
func (lrw *ResponseRecorder) HeaderSize() int {
return lrw.headerBytesWritten
}
<file_sep>#!/bin/bash -e
set -E
set -o pipefail
set -x
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
fi
WORK_DIR="$(mktemp -d)"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
function start_minio() {
start_port=$1
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=<PASSWORD>
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
unset MINIO_CI_CD
unset CI
args=()
for i in $(seq 1 4); do
args+=("http://localhost:$((start_port + i))${WORK_DIR}/mnt/disk$i/ ")
done
for i in $(seq 1 4); do
"${MINIO[@]}" --address ":$((start_port + i))" ${args[@]} 2>&1 >"${WORK_DIR}/server$i.log" &
done
# Wait until all nodes return 403
for i in $(seq 1 4); do
while [ "$(curl -m 1 -s -o /dev/null -w "%{http_code}" http://localhost:$((start_port + i)))" -ne "403" ]; do
echo -n "."
sleep 1
done
done
}
# Prepare fake disks with losetup
function prepare_block_devices() {
set -e
mkdir -p ${WORK_DIR}/disks/ ${WORK_DIR}/mnt/
sudo modprobe loop
for i in 1 2 3 4; do
dd if=/dev/zero of=${WORK_DIR}/disks/img.${i} bs=1M count=2000
device=$(sudo losetup --find --show ${WORK_DIR}/disks/img.${i})
sudo mkfs.ext4 -F ${device}
mkdir -p ${WORK_DIR}/mnt/disk${i}/
sudo mount ${device} ${WORK_DIR}/mnt/disk${i}/
sudo chown "$(id -u):$(id -g)" ${device} ${WORK_DIR}/mnt/disk${i}/
done
set +e
}
# Start a distributed MinIO setup, unmount one disk and check if it is formatted
function main() {
start_port=$(shuf -i 10000-65000 -n 1)
start_minio ${start_port}
# Unmount the disk, after the unmount the device id
# /tmp/xxx/mnt/disk4 will be the same as '/' and it
# will be detected as root disk
while [ "$u" != "0" ]; do
sudo umount ${WORK_DIR}/mnt/disk4/
u=$?
sleep 1
done
# Wait until MinIO self heal kicks in
sleep 60
if [ -f ${WORK_DIR}/mnt/disk4/.minio.sys/format.json ]; then
echo "A root disk is formatted unexpectedely"
cat "${WORK_DIR}/server4.log"
exit -1
fi
}
function cleanup() {
pkill minio
sudo umount ${WORK_DIR}/mnt/disk{1..3}/
sudo rm /dev/minio-loopdisk*
rm -rf "$WORK_DIR"
}
(prepare_block_devices)
(main "$@")
rv=$?
cleanup
exit "$rv"
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"errors"
"fmt"
"github.com/minio/minio/internal/logger"
"github.com/tinylib/msgp/msgp"
)
// xlMetaInlineData is serialized data in [string][]byte pairs.
type xlMetaInlineData []byte
// xlMetaInlineDataVer indicates the version of the inline data structure.
const xlMetaInlineDataVer = 1
// versionOK returns whether the version is ok.
func (x xlMetaInlineData) versionOK() bool {
if len(x) == 0 {
return true
}
return x[0] > 0 && x[0] <= xlMetaInlineDataVer
}
// afterVersion returns the payload after the version, if any.
func (x xlMetaInlineData) afterVersion() []byte {
if len(x) == 0 {
return x
}
return x[1:]
}
// find the data with key s.
// Returns nil if not for or an error occurs.
func (x xlMetaInlineData) find(key string) []byte {
if len(x) == 0 || !x.versionOK() {
return nil
}
sz, buf, err := msgp.ReadMapHeaderBytes(x.afterVersion())
if err != nil || sz == 0 {
return nil
}
for i := uint32(0); i < sz; i++ {
var found []byte
found, buf, err = msgp.ReadMapKeyZC(buf)
if err != nil || sz == 0 {
return nil
}
if string(found) == key {
val, _, _ := msgp.ReadBytesZC(buf)
return val
}
// Skip it
_, buf, err = msgp.ReadBytesZC(buf)
if err != nil {
return nil
}
}
return nil
}
// validate checks if the data is valid.
// It does not check integrity of the stored data.
func (x xlMetaInlineData) validate() error {
if len(x) == 0 {
return nil
}
if !x.versionOK() {
return fmt.Errorf("xlMetaInlineData: unknown version 0x%x", x[0])
}
sz, buf, err := msgp.ReadMapHeaderBytes(x.afterVersion())
if err != nil {
return fmt.Errorf("xlMetaInlineData: %w", err)
}
for i := uint32(0); i < sz; i++ {
var key []byte
key, buf, err = msgp.ReadMapKeyZC(buf)
if err != nil {
return fmt.Errorf("xlMetaInlineData: %w", err)
}
if len(key) == 0 {
return fmt.Errorf("xlMetaInlineData: key %d is length 0", i)
}
_, buf, err = msgp.ReadBytesZC(buf)
if err != nil {
return fmt.Errorf("xlMetaInlineData: %w", err)
}
}
return nil
}
// repair will copy all seemingly valid data entries from a corrupted set.
// This does not ensure that data is correct, but will allow all operations to complete.
func (x *xlMetaInlineData) repair() {
data := *x
if len(data) == 0 {
return
}
if !data.versionOK() {
*x = nil
return
}
sz, buf, err := msgp.ReadMapHeaderBytes(data.afterVersion())
if err != nil {
*x = nil
return
}
// Remove all current data
keys := make([][]byte, 0, sz)
vals := make([][]byte, 0, sz)
for i := uint32(0); i < sz; i++ {
var key, val []byte
key, buf, err = msgp.ReadMapKeyZC(buf)
if err != nil {
break
}
if len(key) == 0 {
break
}
val, buf, err = msgp.ReadBytesZC(buf)
if err != nil {
break
}
keys = append(keys, key)
vals = append(vals, val)
}
x.serialize(-1, keys, vals)
}
// validate checks if the data is valid.
// It does not check integrity of the stored data.
func (x xlMetaInlineData) list() ([]string, error) {
if len(x) == 0 {
return nil, nil
}
if !x.versionOK() {
return nil, errors.New("xlMetaInlineData: unknown version")
}
sz, buf, err := msgp.ReadMapHeaderBytes(x.afterVersion())
if err != nil {
return nil, err
}
keys := make([]string, 0, sz)
for i := uint32(0); i < sz; i++ {
var key []byte
key, buf, err = msgp.ReadMapKeyZC(buf)
if err != nil {
return keys, err
}
if len(key) == 0 {
return keys, fmt.Errorf("xlMetaInlineData: key %d is length 0", i)
}
keys = append(keys, string(key))
// Skip data...
_, buf, err = msgp.ReadBytesZC(buf)
if err != nil {
return keys, err
}
}
return keys, nil
}
// serialize will serialize the provided keys and values.
// The function will panic if keys/value slices aren't of equal length.
// Payload size can give an indication of expected payload size.
// If plSize is <= 0 it will be calculated.
func (x *xlMetaInlineData) serialize(plSize int, keys [][]byte, vals [][]byte) {
if len(keys) != len(vals) {
panic(fmt.Errorf("xlMetaInlineData.serialize: keys/value number mismatch"))
}
if len(keys) == 0 {
*x = nil
return
}
if plSize <= 0 {
plSize = 1 + msgp.MapHeaderSize
for i := range keys {
plSize += len(keys[i]) + len(vals[i]) + msgp.StringPrefixSize + msgp.ArrayHeaderSize
}
}
payload := make([]byte, 1, plSize)
payload[0] = xlMetaInlineDataVer
payload = msgp.AppendMapHeader(payload, uint32(len(keys)))
for i := range keys {
payload = msgp.AppendStringFromBytes(payload, keys[i])
payload = msgp.AppendBytes(payload, vals[i])
}
*x = payload
}
// entries returns the number of entries in the data.
func (x xlMetaInlineData) entries() int {
if len(x) == 0 || !x.versionOK() {
return 0
}
sz, _, _ := msgp.ReadMapHeaderBytes(x.afterVersion())
return int(sz)
}
// replace will add or replace a key/value pair.
func (x *xlMetaInlineData) replace(key string, value []byte) {
in := x.afterVersion()
sz, buf, _ := msgp.ReadMapHeaderBytes(in)
keys := make([][]byte, 0, sz+1)
vals := make([][]byte, 0, sz+1)
// Version plus header...
plSize := 1 + msgp.MapHeaderSize
replaced := false
for i := uint32(0); i < sz; i++ {
var found, foundVal []byte
var err error
found, buf, err = msgp.ReadMapKeyZC(buf)
if err != nil {
break
}
foundVal, buf, err = msgp.ReadBytesZC(buf)
if err != nil {
break
}
plSize += len(found) + msgp.StringPrefixSize + msgp.ArrayHeaderSize
keys = append(keys, found)
if string(found) == key {
vals = append(vals, value)
plSize += len(value)
replaced = true
} else {
vals = append(vals, foundVal)
plSize += len(foundVal)
}
}
// Add one more.
if !replaced {
keys = append(keys, []byte(key))
vals = append(vals, value)
plSize += len(key) + len(value) + msgp.StringPrefixSize + msgp.ArrayHeaderSize
}
// Reserialize...
x.serialize(plSize, keys, vals)
}
// rename will rename a key.
// Returns whether the key was found.
func (x *xlMetaInlineData) rename(oldKey, newKey string) bool {
in := x.afterVersion()
sz, buf, _ := msgp.ReadMapHeaderBytes(in)
keys := make([][]byte, 0, sz)
vals := make([][]byte, 0, sz)
// Version plus header...
plSize := 1 + msgp.MapHeaderSize
found := false
for i := uint32(0); i < sz; i++ {
var foundKey, foundVal []byte
var err error
foundKey, buf, err = msgp.ReadMapKeyZC(buf)
if err != nil {
break
}
foundVal, buf, err = msgp.ReadBytesZC(buf)
if err != nil {
break
}
plSize += len(foundVal) + msgp.StringPrefixSize + msgp.ArrayHeaderSize
vals = append(vals, foundVal)
if string(foundKey) != oldKey {
keys = append(keys, foundKey)
plSize += len(foundKey)
} else {
keys = append(keys, []byte(newKey))
plSize += len(newKey)
found = true
}
}
// If not found, just return.
if !found {
return false
}
// Reserialize...
x.serialize(plSize, keys, vals)
return true
}
// remove will remove one or more keys.
// Returns true if any key was found.
func (x *xlMetaInlineData) remove(keys ...string) bool {
in := x.afterVersion()
sz, buf, _ := msgp.ReadMapHeaderBytes(in)
newKeys := make([][]byte, 0, sz)
newVals := make([][]byte, 0, sz)
var removeKey func(s []byte) bool
// Copy if big number of compares...
if len(keys) > 5 && sz > 5 {
mKeys := make(map[string]struct{}, len(keys))
for _, key := range keys {
mKeys[key] = struct{}{}
}
removeKey = func(s []byte) bool {
_, ok := mKeys[string(s)]
return ok
}
} else {
removeKey = func(s []byte) bool {
for _, key := range keys {
if key == string(s) {
return true
}
}
return false
}
}
// Version plus header...
plSize := 1 + msgp.MapHeaderSize
found := false
for i := uint32(0); i < sz; i++ {
var foundKey, foundVal []byte
var err error
foundKey, buf, err = msgp.ReadMapKeyZC(buf)
if err != nil {
break
}
foundVal, buf, err = msgp.ReadBytesZC(buf)
if err != nil {
break
}
if !removeKey(foundKey) {
plSize += msgp.StringPrefixSize + msgp.ArrayHeaderSize + len(foundKey) + len(foundVal)
newKeys = append(newKeys, foundKey)
newVals = append(newVals, foundVal)
} else {
found = true
}
}
// If not found, just return.
if !found {
return false
}
// If none left...
if len(newKeys) == 0 {
*x = nil
return true
}
// Reserialize...
x.serialize(plSize, newKeys, newVals)
return true
}
// xlMetaV2TrimData will trim any data from the metadata without unmarshalling it.
// If any error occurs the unmodified data is returned.
func xlMetaV2TrimData(buf []byte) []byte {
metaBuf, min, maj, err := checkXL2V1(buf)
if err != nil {
return buf
}
if maj == 1 && min < 1 {
// First version to carry data.
return buf
}
// Skip header
_, metaBuf, err = msgp.ReadBytesZC(metaBuf)
if err != nil {
logger.LogIf(GlobalContext, err)
return buf
}
// Skip CRC
if maj > 1 || min >= 2 {
_, metaBuf, err = msgp.ReadUint32Bytes(metaBuf)
logger.LogIf(GlobalContext, err)
}
// = input - current pos
ends := len(buf) - len(metaBuf)
if ends > len(buf) {
return buf
}
return buf[:ends]
}
<file_sep># MinIO FIPS Builds
MinIO creates FIPS builds using a patched version of the Go compiler (that uses BoringCrypto, from BoringSSL, which is [FIPS 140-2 validated](https://csrc.nist.gov/csrc/media/projects/cryptographic-module-validation-program/documents/security-policies/140sp2964.pdf)) published by the Golang Team [here](https://github.com/golang/go/tree/dev.boringcrypto/misc/boring).
MinIO FIPS executables are available at <http://dl.min.io> - they are only published for `linux-amd64` architecture as binary files with the suffix `.fips`. We also publish corresponding container images to our official image repositories.
We are not making any statements or representations about the suitability of this code or build in relation to the FIPS 140-2 standard. Interested users will have to evaluate for themselves whether this is useful for their own purposes.
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"net/http"
"testing"
)
func BenchmarkURLQueryForm(b *testing.B) {
req, err := http.NewRequest(http.MethodGet, "http://localhost:9000/bucket/name?uploadId=upload&partNumber=1", http.NoBody)
if err != nil {
b.Fatal(err)
}
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
b.ReportAllocs()
// the actual benchmark for PutObject starts here. Reset the benchmark timer.
b.ResetTimer()
if err := req.ParseForm(); err != nil {
b.Fatal(err)
}
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
req.Form.Get("uploadId")
}
})
// Benchmark ends here. Stop timer.
b.StopTimer()
}
// BenchmarkURLQuery - benchmark URL memory allocations
func BenchmarkURLQuery(b *testing.B) {
req, err := http.NewRequest(http.MethodGet, "http://localhost:9000/bucket/name?uploadId=upload&partNumber=1", http.NoBody)
if err != nil {
b.Fatal(err)
}
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
b.ReportAllocs()
// the actual benchmark for PutObject starts here. Reset the benchmark timer.
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
req.URL.Query().Get("uploadId")
}
})
// Benchmark ends here. Stop timer.
b.StopTimer()
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"net/http"
"github.com/klauspost/compress/gzhttp"
"github.com/klauspost/compress/gzip"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
)
const (
kmsPathPrefix = minioReservedBucketPath + "/kms"
kmsAPIVersion = "v1"
kmsAPIVersionPrefix = SlashSeparator + kmsAPIVersion
)
type kmsAPIHandlers struct{}
// registerKMSRouter - Registers KMS APIs
func registerKMSRouter(router *mux.Router) {
kmsAPI := kmsAPIHandlers{}
kmsRouter := router.PathPrefix(kmsPathPrefix).Subrouter()
KMSVersions := []string{
kmsAPIVersionPrefix,
}
gz, err := gzhttp.NewWrapper(gzhttp.MinSize(1000), gzhttp.CompressionLevel(gzip.BestSpeed))
if err != nil {
// Static params, so this is very unlikely.
logger.Fatal(err, "Unable to initialize server")
}
for _, version := range KMSVersions {
// KMS Status APIs
kmsRouter.Methods(http.MethodGet).Path(version + "/status").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSStatusHandler)))
kmsRouter.Methods(http.MethodGet).Path(version + "/metrics").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSMetricsHandler)))
kmsRouter.Methods(http.MethodGet).Path(version + "/apis").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSAPIsHandler)))
kmsRouter.Methods(http.MethodGet).Path(version + "/version").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSVersionHandler)))
// KMS Key APIs
kmsRouter.Methods(http.MethodPost).Path(version+"/key/create").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSCreateKeyHandler))).Queries("key-id", "{key-id:.*}")
kmsRouter.Methods(http.MethodPost).Path(version+"/key/import").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSImportKeyHandler))).Queries("key-id", "{key-id:.*}")
kmsRouter.Methods(http.MethodDelete).Path(version+"/key/delete").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSDeleteKeyHandler))).Queries("key-id", "{key-id:.*}")
kmsRouter.Methods(http.MethodGet).Path(version+"/key/list").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSListKeysHandler))).Queries("pattern", "{pattern:.*}")
kmsRouter.Methods(http.MethodGet).Path(version + "/key/status").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSKeyStatusHandler)))
// KMS Policy APIs
kmsRouter.Methods(http.MethodPost).Path(version+"/policy/set").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSSetPolicyHandler))).Queries("policy", "{policy:.*}")
kmsRouter.Methods(http.MethodPost).Path(version+"/policy/assign").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSAssignPolicyHandler))).Queries("policy", "{policy:.*}")
kmsRouter.Methods(http.MethodGet).Path(version+"/policy/describe").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSDescribePolicyHandler))).Queries("policy", "{policy:.*}")
kmsRouter.Methods(http.MethodGet).Path(version+"/policy/get").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSGetPolicyHandler))).Queries("policy", "{policy:.*}")
kmsRouter.Methods(http.MethodDelete).Path(version+"/policy/delete").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSDeletePolicyHandler))).Queries("policy", "{policy:.*}")
kmsRouter.Methods(http.MethodGet).Path(version+"/policy/list").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSListPoliciesHandler))).Queries("pattern", "{pattern:.*}")
// KMS Identity APIs
kmsRouter.Methods(http.MethodGet).Path(version+"/identity/describe").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSDescribeIdentityHandler))).Queries("identity", "{identity:.*}")
kmsRouter.Methods(http.MethodGet).Path(version + "/identity/describe-self").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSDescribeSelfIdentityHandler)))
kmsRouter.Methods(http.MethodDelete).Path(version+"/identity/delete").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSDeleteIdentityHandler))).Queries("identity", "{identity:.*}")
kmsRouter.Methods(http.MethodGet).Path(version+"/identity/list").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSListIdentitiesHandler))).Queries("pattern", "{pattern:.*}")
}
// If none of the routes match add default error handler routes
kmsRouter.NotFoundHandler = httpTraceAll(errorResponseHandler)
kmsRouter.MethodNotAllowedHandler = httpTraceAll(methodNotAllowedHandler("KMS"))
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package scanner
import (
"fmt"
"strconv"
"time"
"github.com/minio/minio/internal/config"
"github.com/minio/pkg/env"
)
// Compression environment variables
const (
Speed = "speed"
EnvSpeed = "MINIO_SCANNER_SPEED"
// All below are deprecated in October 2022 and
// replaced them with a single speed parameter
Delay = "delay"
MaxWait = "max_wait"
Cycle = "cycle"
EnvDelay = "MINIO_SCANNER_DELAY"
EnvCycle = "MINIO_SCANNER_CYCLE"
EnvDelayLegacy = "MINIO_CRAWLER_DELAY"
EnvMaxWait = "MINIO_SCANNER_MAX_WAIT"
EnvMaxWaitLegacy = "MINIO_CRAWLER_MAX_WAIT"
)
// Config represents the heal settings.
type Config struct {
// Delay is the sleep multiplier.
Delay float64 `json:"delay"`
// MaxWait is maximum wait time between operations
MaxWait time.Duration
// Cycle is the time.Duration between each scanner cycles
Cycle time.Duration
}
// DefaultKVS - default KV config for heal settings
var DefaultKVS = config.KVS{
config.KV{
Key: Speed,
Value: "default",
},
// Deprecated Oct 2022
config.KV{
Key: Delay,
Value: "",
Deprecated: true,
},
// Deprecated Oct 2022
config.KV{
Key: MaxWait,
Value: "",
Deprecated: true,
},
// Deprecated Oct 2022
config.KV{
Key: Cycle,
Value: "",
Deprecated: true,
},
}
// LookupConfig - lookup config and override with valid environment settings if any.
func LookupConfig(kvs config.KVS) (cfg Config, err error) {
if err = config.CheckValidKeys(config.ScannerSubSys, kvs, DefaultKVS); err != nil {
return cfg, err
}
// Stick to loading deprecated config/env if they are already set
if kvs.Get(Delay) != "" && kvs.Get(MaxWait) != "" && kvs.Get(Cycle) != "" {
return lookupDeprecatedScannerConfig(kvs)
}
switch speed := env.Get(EnvSpeed, kvs.GetWithDefault(Speed, DefaultKVS)); speed {
case "fastest":
cfg.Delay, cfg.MaxWait, cfg.Cycle = 0, 0, 0
case "fast":
cfg.Delay, cfg.MaxWait, cfg.Cycle = 1, 100*time.Millisecond, time.Minute
case "default":
cfg.Delay, cfg.MaxWait, cfg.Cycle = 2, 5*time.Second, time.Minute
case "slow":
cfg.Delay, cfg.MaxWait, cfg.Cycle = 10, 15*time.Second, time.Minute
case "slowest":
cfg.Delay, cfg.MaxWait, cfg.Cycle = 100, 15*time.Second, 30*time.Minute
default:
return cfg, fmt.Errorf("unknown '%s' value", speed)
}
return
}
func lookupDeprecatedScannerConfig(kvs config.KVS) (cfg Config, err error) {
delay := env.Get(EnvDelayLegacy, "")
if delay == "" {
delay = env.Get(EnvDelay, kvs.GetWithDefault(Delay, DefaultKVS))
}
cfg.Delay, err = strconv.ParseFloat(delay, 64)
if err != nil {
return cfg, err
}
maxWait := env.Get(EnvMaxWaitLegacy, "")
if maxWait == "" {
maxWait = env.Get(EnvMaxWait, kvs.GetWithDefault(MaxWait, DefaultKVS))
}
cfg.MaxWait, err = time.ParseDuration(maxWait)
if err != nil {
return cfg, err
}
cfg.Cycle, err = time.ParseDuration(env.Get(EnvCycle, kvs.GetWithDefault(Cycle, DefaultKVS)))
if err != nil {
return cfg, err
}
return cfg, nil
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"fmt"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/bucket/replication"
xhttp "github.com/minio/minio/internal/http"
)
//go:generate msgp -file=$GOFILE
// replicatedTargetInfo struct represents replication info on a target
type replicatedTargetInfo struct {
Arn string
Size int64
Duration time.Duration
ReplicationAction replicationAction // full or metadata only
OpType replication.Type // whether incoming replication, existing object, healing etc..
ReplicationStatus replication.StatusType
PrevReplicationStatus replication.StatusType
VersionPurgeStatus VersionPurgeStatusType
ResyncTimestamp string
ReplicationResynced bool // true only if resync attempted for this target
}
// Empty returns true for a target if arn is empty
func (rt replicatedTargetInfo) Empty() bool {
return rt.Arn == ""
}
type replicatedInfos struct {
ReplicationTimeStamp time.Time
Targets []replicatedTargetInfo
}
func (ri replicatedInfos) CompletedSize() (sz int64) {
for _, t := range ri.Targets {
if t.Empty() {
continue
}
if t.ReplicationStatus == replication.Completed && t.PrevReplicationStatus != replication.Completed {
sz += t.Size
}
}
return sz
}
// ReplicationAttempted returns true if replication was attempted on any of the targets for the object version
// queued
func (ri replicatedInfos) ReplicationResynced() bool {
for _, t := range ri.Targets {
if t.Empty() || !t.ReplicationResynced {
continue
}
return true
}
return false
}
func (ri replicatedInfos) ReplicationStatusInternal() string {
b := new(bytes.Buffer)
for _, t := range ri.Targets {
if t.Empty() {
continue
}
fmt.Fprintf(b, "%s=%s;", t.Arn, t.ReplicationStatus.String())
}
return b.String()
}
func (ri replicatedInfos) ReplicationStatus() replication.StatusType {
if len(ri.Targets) == 0 {
return replication.StatusType("")
}
completed := 0
for _, v := range ri.Targets {
switch v.ReplicationStatus {
case replication.Failed:
return replication.Failed
case replication.Completed:
completed++
}
}
if completed == len(ri.Targets) {
return replication.Completed
}
return replication.Pending
}
func (ri replicatedInfos) VersionPurgeStatus() VersionPurgeStatusType {
if len(ri.Targets) == 0 {
return VersionPurgeStatusType("")
}
completed := 0
for _, v := range ri.Targets {
switch v.VersionPurgeStatus {
case Failed:
return Failed
case Complete:
completed++
}
}
if completed == len(ri.Targets) {
return Complete
}
return Pending
}
func (ri replicatedInfos) VersionPurgeStatusInternal() string {
b := new(bytes.Buffer)
for _, t := range ri.Targets {
if t.Empty() {
continue
}
if t.VersionPurgeStatus.Empty() {
continue
}
fmt.Fprintf(b, "%s=%s;", t.Arn, t.VersionPurgeStatus)
}
return b.String()
}
func (ri replicatedInfos) Action() replicationAction {
for _, t := range ri.Targets {
if t.Empty() {
continue
}
// rely on replication action from target that actually performed replication now.
if t.PrevReplicationStatus != replication.Completed {
return t.ReplicationAction
}
}
return replicateNone
}
var replStatusRegex = regexp.MustCompile(`([^=].*?)=([^,].*?);`)
// TargetReplicationStatus - returns replication status of a target
func (o *ObjectInfo) TargetReplicationStatus(arn string) (status replication.StatusType) {
repStatMatches := replStatusRegex.FindAllStringSubmatch(o.ReplicationStatusInternal, -1)
for _, repStatMatch := range repStatMatches {
if len(repStatMatch) != 3 {
return
}
if repStatMatch[1] == arn {
return replication.StatusType(repStatMatch[2])
}
}
return
}
type replicateTargetDecision struct {
Replicate bool // Replicate to this target
Synchronous bool // Synchronous replication configured.
Arn string // ARN of replication target
ID string
}
func (t *replicateTargetDecision) String() string {
return fmt.Sprintf("%t;%t;%s;%s", t.Replicate, t.Synchronous, t.Arn, t.ID)
}
func newReplicateTargetDecision(arn string, replicate bool, sync bool) replicateTargetDecision {
d := replicateTargetDecision{
Replicate: replicate,
Synchronous: sync,
Arn: arn,
}
return d
}
// ReplicateDecision represents replication decision for each target
type ReplicateDecision struct {
targetsMap map[string]replicateTargetDecision
}
// ReplicateAny returns true if atleast one target qualifies for replication
func (d *ReplicateDecision) ReplicateAny() bool {
for _, t := range d.targetsMap {
if t.Replicate {
return true
}
}
return false
}
// Synchronous returns true if atleast one target qualifies for synchronous replication
func (d *ReplicateDecision) Synchronous() bool {
for _, t := range d.targetsMap {
if t.Synchronous {
return true
}
}
return false
}
func (d *ReplicateDecision) String() string {
b := new(bytes.Buffer)
for key, value := range d.targetsMap {
fmt.Fprintf(b, "%s=%s,", key, value.String())
}
return strings.TrimSuffix(b.String(), ",")
}
// Set updates ReplicateDecision with target's replication decision
func (d *ReplicateDecision) Set(t replicateTargetDecision) {
if d.targetsMap == nil {
d.targetsMap = make(map[string]replicateTargetDecision)
}
d.targetsMap[t.Arn] = t
}
// PendingStatus returns a stringified representation of internal replication status with all targets marked as `PENDING`
func (d *ReplicateDecision) PendingStatus() string {
b := new(bytes.Buffer)
for _, k := range d.targetsMap {
if k.Replicate {
fmt.Fprintf(b, "%s=%s;", k.Arn, replication.Pending.String())
}
}
return b.String()
}
// ResyncDecision is a struct representing a map with target's individual resync decisions
type ResyncDecision struct {
targets map[string]ResyncTargetDecision
}
// Empty returns true if no targets with resync decision present
func (r *ResyncDecision) Empty() bool {
return r.targets == nil
}
func (r *ResyncDecision) mustResync() bool {
for _, v := range r.targets {
if v.Replicate {
return true
}
}
return false
}
func (r *ResyncDecision) mustResyncTarget(tgtArn string) bool {
if r.targets == nil {
return false
}
v, ok := r.targets[tgtArn]
if ok && v.Replicate {
return true
}
return false
}
// ResyncTargetDecision is struct that represents resync decision for this target
type ResyncTargetDecision struct {
Replicate bool
ResetID string
ResetBeforeDate time.Time
}
var errInvalidReplicateDecisionFormat = fmt.Errorf("ReplicateDecision has invalid format")
// parse k-v pairs of target ARN to stringified ReplicateTargetDecision delimited by ',' into a
// ReplicateDecision struct
func parseReplicateDecision(s string) (r ReplicateDecision, err error) {
r = ReplicateDecision{
targetsMap: make(map[string]replicateTargetDecision),
}
if len(s) == 0 {
return
}
pairs := strings.Split(s, ",")
for _, p := range pairs {
slc := strings.Split(p, "=")
if len(slc) != 2 {
return r, errInvalidReplicateDecisionFormat
}
tgtStr := strings.TrimPrefix(slc[1], "\"")
tgtStr = strings.TrimSuffix(tgtStr, "\"")
tgt := strings.Split(tgtStr, ";")
if len(tgt) != 4 {
return r, errInvalidReplicateDecisionFormat
}
var replicate, sync bool
var err error
replicate, err = strconv.ParseBool(tgt[0])
if err != nil {
return r, err
}
sync, err = strconv.ParseBool(tgt[1])
if err != nil {
return r, err
}
r.targetsMap[slc[0]] = replicateTargetDecision{Replicate: replicate, Synchronous: sync, Arn: tgt[2], ID: tgt[3]}
}
return
}
// ReplicationState represents internal replication state
type ReplicationState struct {
ReplicaTimeStamp time.Time // timestamp when last replica update was received
ReplicaStatus replication.StatusType // replica statusstringis
DeleteMarker bool // represents DeleteMarker replication state
ReplicationTimeStamp time.Time // timestamp when last replication activity happened
ReplicationStatusInternal string // stringified representation of all replication activity
// VersionPurgeStatusInternal is internally in the format "arn1=PENDING;arn2=COMMPLETED;"
VersionPurgeStatusInternal string // stringified representation of all version purge statuses
ReplicateDecisionStr string // stringified representation of replication decision for each target
Targets map[string]replication.StatusType // map of ARN->replication status for ongoing replication activity
PurgeTargets map[string]VersionPurgeStatusType // map of ARN->VersionPurgeStatus for all the targets
ResetStatusesMap map[string]string // map of ARN-> stringified reset id and timestamp for all the targets
}
// Equal returns true if replication state is identical for version purge statuses and (replica)tion statuses.
func (rs *ReplicationState) Equal(o ReplicationState) bool {
return rs.ReplicaStatus == o.ReplicaStatus &&
rs.ReplicaTimeStamp.Equal(o.ReplicaTimeStamp) &&
rs.ReplicationTimeStamp.Equal(o.ReplicationTimeStamp) &&
rs.ReplicationStatusInternal == o.ReplicationStatusInternal &&
rs.VersionPurgeStatusInternal == o.VersionPurgeStatusInternal
}
// CompositeReplicationStatus returns overall replication status for the object version being replicated.
func (rs *ReplicationState) CompositeReplicationStatus() (st replication.StatusType) {
switch {
case rs.ReplicationStatusInternal != "":
switch replication.StatusType(rs.ReplicationStatusInternal) {
case replication.Pending, replication.Completed, replication.Failed, replication.Replica: // for backward compatibility
return replication.StatusType(rs.ReplicationStatusInternal)
default:
replStatus := getCompositeReplicationStatus(rs.Targets)
// return REPLICA status if replica received timestamp is later than replication timestamp
// provided object replication completed for all targets.
if !rs.ReplicaTimeStamp.Equal(timeSentinel) && replStatus == replication.Completed && rs.ReplicaTimeStamp.After(rs.ReplicationTimeStamp) {
return rs.ReplicaStatus
}
return replStatus
}
case !rs.ReplicaStatus.Empty():
return rs.ReplicaStatus
default:
return
}
}
// CompositeVersionPurgeStatus returns overall replication purge status for the permanent delete being replicated.
func (rs *ReplicationState) CompositeVersionPurgeStatus() VersionPurgeStatusType {
switch VersionPurgeStatusType(rs.VersionPurgeStatusInternal) {
case Pending, Complete, Failed: // for backward compatibility
return VersionPurgeStatusType(rs.VersionPurgeStatusInternal)
default:
return getCompositeVersionPurgeStatus(rs.PurgeTargets)
}
}
// TargetState returns replicatedInfos struct initialized with the previous state of replication
func (rs *ReplicationState) targetState(arn string) (r replicatedTargetInfo) {
return replicatedTargetInfo{
Arn: arn,
PrevReplicationStatus: rs.Targets[arn],
VersionPurgeStatus: rs.PurgeTargets[arn],
ResyncTimestamp: rs.ResetStatusesMap[arn],
}
}
// getReplicationState returns replication state using target replicated info for the targets
func getReplicationState(rinfos replicatedInfos, prevState ReplicationState, vID string) ReplicationState {
rs := ReplicationState{
ReplicateDecisionStr: prevState.ReplicateDecisionStr,
ResetStatusesMap: prevState.ResetStatusesMap,
ReplicaTimeStamp: prevState.ReplicaTimeStamp,
ReplicaStatus: prevState.ReplicaStatus,
}
var replStatuses, vpurgeStatuses string
replStatuses = rinfos.ReplicationStatusInternal()
rs.Targets = replicationStatusesMap(replStatuses)
rs.ReplicationStatusInternal = replStatuses
rs.ReplicationTimeStamp = rinfos.ReplicationTimeStamp
vpurgeStatuses = rinfos.VersionPurgeStatusInternal()
rs.VersionPurgeStatusInternal = vpurgeStatuses
rs.PurgeTargets = versionPurgeStatusesMap(vpurgeStatuses)
for _, rinfo := range rinfos.Targets {
if rinfo.ResyncTimestamp != "" {
rs.ResetStatusesMap[targetResetHeader(rinfo.Arn)] = rinfo.ResyncTimestamp
}
}
return rs
}
// constructs a replication status map from string representation
func replicationStatusesMap(s string) map[string]replication.StatusType {
targets := make(map[string]replication.StatusType)
repStatMatches := replStatusRegex.FindAllStringSubmatch(s, -1)
for _, repStatMatch := range repStatMatches {
if len(repStatMatch) != 3 {
continue
}
status := replication.StatusType(repStatMatch[2])
targets[repStatMatch[1]] = status
}
return targets
}
// constructs a version purge status map from string representation
func versionPurgeStatusesMap(s string) map[string]VersionPurgeStatusType {
targets := make(map[string]VersionPurgeStatusType)
purgeStatusMatches := replStatusRegex.FindAllStringSubmatch(s, -1)
for _, purgeStatusMatch := range purgeStatusMatches {
if len(purgeStatusMatch) != 3 {
continue
}
targets[purgeStatusMatch[1]] = VersionPurgeStatusType(purgeStatusMatch[2])
}
return targets
}
// return the overall replication status for all the targets
func getCompositeReplicationStatus(m map[string]replication.StatusType) replication.StatusType {
if len(m) == 0 {
return replication.StatusType("")
}
completed := 0
for _, v := range m {
switch v {
case replication.Failed:
return replication.Failed
case replication.Completed:
completed++
}
}
if completed == len(m) {
return replication.Completed
}
return replication.Pending
}
// return the overall version purge status for all the targets
func getCompositeVersionPurgeStatus(m map[string]VersionPurgeStatusType) VersionPurgeStatusType {
if len(m) == 0 {
return VersionPurgeStatusType("")
}
completed := 0
for _, v := range m {
switch v {
case Failed:
return Failed
case Complete:
completed++
}
}
if completed == len(m) {
return Complete
}
return Pending
}
// getHealReplicateObjectInfo returns info needed by heal replication in ReplicateObjectInfo
func getHealReplicateObjectInfo(objInfo ObjectInfo, rcfg replicationConfig) ReplicateObjectInfo {
oi := objInfo.Clone()
if rcfg.Config != nil && rcfg.Config.RoleArn != "" {
// For backward compatibility of objects pending/failed replication.
// Save replication related statuses in the new internal representation for
// compatible behavior.
if !oi.ReplicationStatus.Empty() {
oi.ReplicationStatusInternal = fmt.Sprintf("%s=%s;", rcfg.Config.RoleArn, oi.ReplicationStatus)
}
if !oi.VersionPurgeStatus.Empty() {
oi.VersionPurgeStatusInternal = fmt.Sprintf("%s=%s;", rcfg.Config.RoleArn, oi.VersionPurgeStatus)
}
for k, v := range oi.UserDefined {
if strings.EqualFold(k, ReservedMetadataPrefixLower+ReplicationReset) {
delete(oi.UserDefined, k)
oi.UserDefined[targetResetHeader(rcfg.Config.RoleArn)] = v
}
}
}
var dsc ReplicateDecision
var tgtStatuses map[string]replication.StatusType
var purgeStatuses map[string]VersionPurgeStatusType
if oi.DeleteMarker || !oi.VersionPurgeStatus.Empty() {
dsc = checkReplicateDelete(GlobalContext, oi.Bucket, ObjectToDelete{
ObjectV: ObjectV{
ObjectName: oi.Name,
VersionID: oi.VersionID,
},
}, oi, ObjectOptions{
Versioned: globalBucketVersioningSys.PrefixEnabled(oi.Bucket, oi.Name),
VersionSuspended: globalBucketVersioningSys.PrefixSuspended(oi.Bucket, oi.Name),
}, nil)
} else {
dsc = mustReplicate(GlobalContext, oi.Bucket, oi.Name, getMustReplicateOptions(ObjectInfo{
UserDefined: oi.UserDefined,
}, replication.HealReplicationType, ObjectOptions{}))
}
tgtStatuses = replicationStatusesMap(oi.ReplicationStatusInternal)
purgeStatuses = versionPurgeStatusesMap(oi.VersionPurgeStatusInternal)
existingObjResync := rcfg.Resync(GlobalContext, oi, &dsc, tgtStatuses)
tm, _ := time.Parse(time.RFC3339Nano, oi.UserDefined[ReservedMetadataPrefixLower+ReplicationTimestamp])
return ReplicateObjectInfo{
ObjectInfo: oi,
OpType: replication.HealReplicationType,
Dsc: dsc,
ExistingObjResync: existingObjResync,
TargetStatuses: tgtStatuses,
TargetPurgeStatuses: purgeStatuses,
ReplicationTimestamp: tm,
}
}
// vID here represents the versionID client specified in request - need to distinguish between delete marker and delete marker deletion
func (o *ObjectInfo) getReplicationState(dsc string, vID string, heal bool) ReplicationState {
rs := ReplicationState{
ReplicationStatusInternal: o.ReplicationStatusInternal,
VersionPurgeStatusInternal: o.VersionPurgeStatusInternal,
ReplicateDecisionStr: dsc,
Targets: make(map[string]replication.StatusType),
PurgeTargets: make(map[string]VersionPurgeStatusType),
ResetStatusesMap: make(map[string]string),
}
rs.Targets = replicationStatusesMap(o.ReplicationStatusInternal)
rs.PurgeTargets = versionPurgeStatusesMap(o.VersionPurgeStatusInternal)
for k, v := range o.UserDefined {
if strings.HasPrefix(k, ReservedMetadataPrefixLower+ReplicationReset) {
arn := strings.TrimPrefix(k, fmt.Sprintf("%s-", ReservedMetadataPrefixLower+ReplicationReset))
rs.ResetStatusesMap[arn] = v
}
}
return rs
}
// ReplicationState returns replication state using other internal replication metadata in ObjectToDelete
func (o *ObjectToDelete) ReplicationState() ReplicationState {
r := ReplicationState{
ReplicationStatusInternal: o.DeleteMarkerReplicationStatus,
VersionPurgeStatusInternal: o.VersionPurgeStatuses,
ReplicateDecisionStr: o.ReplicateDecisionStr,
}
r.Targets = replicationStatusesMap(o.DeleteMarkerReplicationStatus)
r.PurgeTargets = versionPurgeStatusesMap(o.VersionPurgeStatuses)
return r
}
// VersionPurgeStatus returns a composite version purge status across targets
func (d *DeletedObject) VersionPurgeStatus() VersionPurgeStatusType {
return d.ReplicationState.CompositeVersionPurgeStatus()
}
// DeleteMarkerReplicationStatus return composite replication status of delete marker across targets
func (d *DeletedObject) DeleteMarkerReplicationStatus() replication.StatusType {
return d.ReplicationState.CompositeReplicationStatus()
}
// ResyncTargetsInfo holds a slice of targets with resync info per target
type ResyncTargetsInfo struct {
Targets []ResyncTarget `json:"target,omitempty"`
}
// ResyncTarget is a struct representing the Target reset ID where target is identified by its Arn
type ResyncTarget struct {
Arn string `json:"arn"`
ResetID string `json:"resetid"`
StartTime time.Time `json:"startTime"`
EndTime time.Time `json:"endTime"`
// Status of resync operation
ResyncStatus string `json:"resyncStatus,omitempty"`
// Completed size in bytes
ReplicatedSize int64 `json:"completedReplicationSize"`
// Failed size in bytes
FailedSize int64 `json:"failedReplicationSize"`
// Total number of failed operations
FailedCount int64 `json:"failedReplicationCount"`
// Total number of failed operations
ReplicatedCount int64 `json:"replicationCount"`
// Last bucket/object replicated.
Bucket string `json:"bucket,omitempty"`
Object string `json:"object,omitempty"`
}
// VersionPurgeStatusType represents status of a versioned delete or permanent delete w.r.t bucket replication
type VersionPurgeStatusType string
const (
// Pending - versioned delete replication is pending.
Pending VersionPurgeStatusType = "PENDING"
// Complete - versioned delete replication is now complete, erase version on disk.
Complete VersionPurgeStatusType = "COMPLETE"
// Failed - versioned delete replication failed.
Failed VersionPurgeStatusType = "FAILED"
)
// Empty returns true if purge status was not set.
func (v VersionPurgeStatusType) Empty() bool {
return string(v) == ""
}
// Pending returns true if the version is pending purge.
func (v VersionPurgeStatusType) Pending() bool {
return v == Pending || v == Failed
}
type replicationResyncer struct {
// map of bucket to their resync status
statusMap map[string]BucketReplicationResyncStatus
workerSize int
resyncCancelCh chan struct{}
workerCh chan struct{}
sync.RWMutex
}
const (
replicationDir = ".replication"
resyncFileName = "resync.bin"
resyncMetaFormat = 1
resyncMetaVersionV1 = 1
resyncMetaVersion = resyncMetaVersionV1
)
type resyncOpts struct {
bucket string
arn string
resyncID string
resyncBefore time.Time
}
// ResyncStatusType status of resync operation
type ResyncStatusType int
const (
// NoResync - no resync in progress
NoResync ResyncStatusType = iota
// ResyncPending - resync pending
ResyncPending
// ResyncCanceled - resync canceled
ResyncCanceled
// ResyncStarted - resync in progress
ResyncStarted
// ResyncCompleted - resync finished
ResyncCompleted
// ResyncFailed - resync failed
ResyncFailed
)
func (rt ResyncStatusType) isValid() bool {
return rt != NoResync
}
func (rt ResyncStatusType) String() string {
switch rt {
case ResyncStarted:
return "Ongoing"
case ResyncCompleted:
return "Completed"
case ResyncFailed:
return "Failed"
case ResyncPending:
return "Pending"
case ResyncCanceled:
return "Canceled"
default:
return ""
}
}
// TargetReplicationResyncStatus status of resync of bucket for a specific target
type TargetReplicationResyncStatus struct {
StartTime time.Time `json:"startTime" msg:"st"`
LastUpdate time.Time `json:"lastUpdated" msg:"lst"`
// Resync ID assigned to this reset
ResyncID string `json:"resyncID" msg:"id"`
// ResyncBeforeDate - resync all objects created prior to this date
ResyncBeforeDate time.Time `json:"resyncBeforeDate" msg:"rdt"`
// Status of resync operation
ResyncStatus ResyncStatusType `json:"resyncStatus" msg:"rst"`
// Failed size in bytes
FailedSize int64 `json:"failedReplicationSize" msg:"fs"`
// Total number of failed operations
FailedCount int64 `json:"failedReplicationCount" msg:"frc"`
// Completed size in bytes
ReplicatedSize int64 `json:"completedReplicationSize" msg:"rs"`
// Total number of failed operations
ReplicatedCount int64 `json:"replicationCount" msg:"rrc"`
// Last bucket/object replicated.
Bucket string `json:"-" msg:"bkt"`
Object string `json:"-" msg:"obj"`
}
// BucketReplicationResyncStatus captures current replication resync status
type BucketReplicationResyncStatus struct {
Version int `json:"version" msg:"v"`
// map of remote arn to their resync status for a bucket
TargetsMap map[string]TargetReplicationResyncStatus `json:"resyncMap,omitempty" msg:"brs"`
ID int `json:"id" msg:"id"`
LastUpdate time.Time `json:"lastUpdate" msg:"lu"`
}
func (rs *BucketReplicationResyncStatus) cloneTgtStats() (m map[string]TargetReplicationResyncStatus) {
m = make(map[string]TargetReplicationResyncStatus)
for arn, st := range rs.TargetsMap {
m[arn] = st
}
return
}
func newBucketResyncStatus(bucket string) BucketReplicationResyncStatus {
return BucketReplicationResyncStatus{
TargetsMap: make(map[string]TargetReplicationResyncStatus),
Version: resyncMetaVersion,
}
}
var contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`)
// parse size from content-range header
func parseSizeFromContentRange(h http.Header) (sz int64, err error) {
cr := h.Get(xhttp.ContentRange)
if cr == "" {
return sz, fmt.Errorf("Content-Range not set")
}
parts := contentRangeRegexp.FindStringSubmatch(cr)
if len(parts) != 4 {
return sz, fmt.Errorf("invalid Content-Range header %s", cr)
}
if parts[3] == "*" {
return -1, nil
}
var usz uint64
usz, err = strconv.ParseUint(parts[3], 10, 64)
if err != nil {
return sz, err
}
return int64(usz), nil
}
func extractReplicateDiffOpts(q url.Values) (opts madmin.ReplDiffOpts) {
opts.Verbose = q.Get("verbose") == "true"
opts.ARN = q.Get("arn")
opts.Prefix = q.Get("prefix")
return
}
const (
replicationMRFDir = bucketMetaPrefix + SlashSeparator + replicationDir + SlashSeparator + "mrf"
mrfMetaFormat = 1
mrfMetaVersionV1 = 1
mrfMetaVersion = mrfMetaVersionV1
)
// MRFReplicateEntry mrf entry to save to disk
type MRFReplicateEntry struct {
Bucket string `json:"bucket" msg:"b"`
Object string `json:"object" msg:"o"`
versionID string `json:"-"`
}
// MRFReplicateEntries has the map of MRF entries to save to disk
type MRFReplicateEntries struct {
Entries map[string]MRFReplicateEntry `json:"entries" msg:"e"`
Version int `json:"version" msg:"v"`
}
// ToMRFEntry returns the relevant info needed by MRF
func (ri ReplicateObjectInfo) ToMRFEntry() MRFReplicateEntry {
return MRFReplicateEntry{
Bucket: ri.Bucket,
Object: ri.Name,
versionID: ri.VersionID,
}
}
func getReplicationStatsPath() string {
return bucketMetaPrefix + SlashSeparator + replicationDir + SlashSeparator + "replication.stats"
}
const (
replStatsMetaFormat = 1
replStatsVersionV1 = 1
replStatsVersion = replStatsVersionV1
replStatsSaveInterval = time.Minute * 5
)
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"encoding/gob"
"errors"
"net/http"
"sort"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/sync/errgroup"
)
const (
peerS3Version = "v1" // First implementation
peerS3VersionPrefix = SlashSeparator + peerS3Version
peerS3Prefix = minioReservedBucketPath + "/peer-s3"
peerS3Path = peerS3Prefix + peerS3VersionPrefix
)
const (
peerS3MethodHealth = "/health"
peerS3MethodMakeBucket = "/make-bucket"
peerS3MethodGetBucketInfo = "/get-bucket-info"
peerS3MethodDeleteBucket = "/delete-bucket"
peerS3MethodListBuckets = "/list-buckets"
)
const (
peerS3Bucket = "bucket"
peerS3BucketDeleted = "bucket-deleted"
peerS3BucketForceCreate = "force-create"
peerS3BucketForceDelete = "force-delete"
)
type peerS3Server struct{}
func (s *peerS3Server) writeErrorResponse(w http.ResponseWriter, err error) {
w.WriteHeader(http.StatusForbidden)
w.Write([]byte(err.Error()))
}
// IsValid - To authenticate and verify the time difference.
func (s *peerS3Server) IsValid(w http.ResponseWriter, r *http.Request) bool {
objAPI := newObjectLayerFn()
if objAPI == nil {
s.writeErrorResponse(w, errServerNotInitialized)
return false
}
if err := storageServerRequestValidate(r); err != nil {
s.writeErrorResponse(w, err)
return false
}
return true
}
// HealthHandler - returns true of health
func (s *peerS3Server) HealthHandler(w http.ResponseWriter, r *http.Request) {
s.IsValid(w, r)
}
func listBucketsLocal(ctx context.Context, opts BucketOptions) (buckets []BucketInfo, err error) {
globalLocalDrivesMu.RLock()
globalLocalDrives := globalLocalDrives
globalLocalDrivesMu.RUnlock()
quorum := (len(globalLocalDrives) / 2)
buckets = make([]BucketInfo, 0, 32)
healBuckets := map[string]VolInfo{}
// lists all unique buckets across drives.
if err := listAllBuckets(ctx, globalLocalDrives, healBuckets, quorum); err != nil {
return nil, err
}
// include deleted buckets in listBuckets output
deletedBuckets := map[string]VolInfo{}
if opts.Deleted {
// lists all deleted buckets across drives.
if err := listDeletedBuckets(ctx, globalLocalDrives, deletedBuckets, quorum); err != nil {
return nil, err
}
}
for _, v := range healBuckets {
bi := BucketInfo{
Name: v.Name,
Created: v.Created,
}
if vi, ok := deletedBuckets[v.Name]; ok {
bi.Deleted = vi.Created
}
buckets = append(buckets, bi)
}
for _, v := range deletedBuckets {
if _, ok := healBuckets[v.Name]; !ok {
buckets = append(buckets, BucketInfo{
Name: v.Name,
Deleted: v.Created,
})
}
}
sort.Slice(buckets, func(i, j int) bool {
return buckets[i].Name < buckets[j].Name
})
return buckets, nil
}
func getBucketInfoLocal(ctx context.Context, bucket string, opts BucketOptions) (BucketInfo, error) {
globalLocalDrivesMu.RLock()
globalLocalDrives := globalLocalDrives
globalLocalDrivesMu.RUnlock()
g := errgroup.WithNErrs(len(globalLocalDrives)).WithConcurrency(32)
bucketsInfo := make([]BucketInfo, len(globalLocalDrives))
// Make a volume entry on all underlying storage disks.
for index := range globalLocalDrives {
index := index
g.Go(func() error {
if globalLocalDrives[index] == nil {
return errDiskNotFound
}
volInfo, err := globalLocalDrives[index].StatVol(ctx, bucket)
if err != nil {
if opts.Deleted {
dvi, derr := globalLocalDrives[index].StatVol(ctx, pathJoin(minioMetaBucket, bucketMetaPrefix, deletedBucketsPrefix, bucket))
if derr != nil {
return err
}
bucketsInfo[index] = BucketInfo{Name: bucket, Deleted: dvi.Created}
return nil
}
return err
}
bucketsInfo[index] = BucketInfo{Name: bucket, Created: volInfo.Created}
return nil
}, index)
}
errs := g.Wait()
if err := reduceReadQuorumErrs(ctx, errs, bucketOpIgnoredErrs, (len(globalLocalDrives) / 2)); err != nil {
return BucketInfo{}, err
}
var bucketInfo BucketInfo
for i, err := range errs {
if err == nil {
bucketInfo = bucketsInfo[i]
break
}
}
return bucketInfo, nil
}
func deleteBucketLocal(ctx context.Context, bucket string, opts DeleteBucketOptions) error {
globalLocalDrivesMu.RLock()
globalLocalDrives := globalLocalDrives
globalLocalDrivesMu.RUnlock()
g := errgroup.WithNErrs(len(globalLocalDrives)).WithConcurrency(32)
// Make a volume entry on all underlying storage disks.
for index := range globalLocalDrives {
index := index
g.Go(func() error {
if globalLocalDrives[index] == nil {
return errDiskNotFound
}
return globalLocalDrives[index].DeleteVol(ctx, bucket, opts.Force)
}, index)
}
var recreate bool
errs := g.Wait()
for index, err := range errs {
if errors.Is(err, errVolumeNotEmpty) {
recreate = true
}
if err == nil && recreate {
// ignore any errors
globalLocalDrives[index].MakeVol(ctx, bucket)
}
}
for _, err := range errs {
if err != nil {
return err
}
}
return nil
}
func makeBucketLocal(ctx context.Context, bucket string, opts MakeBucketOptions) error {
globalLocalDrivesMu.RLock()
globalLocalDrives := globalLocalDrives
globalLocalDrivesMu.RUnlock()
g := errgroup.WithNErrs(len(globalLocalDrives)).WithConcurrency(32)
// Make a volume entry on all underlying storage disks.
for index := range globalLocalDrives {
index := index
g.Go(func() error {
if globalLocalDrives[index] == nil {
return errDiskNotFound
}
err := globalLocalDrives[index].MakeVol(ctx, bucket)
if opts.ForceCreate && errors.Is(err, errVolumeExists) {
// No need to return error when force create was
// requested.
return nil
}
if err != nil && !errors.Is(err, errVolumeExists) {
logger.LogIf(ctx, err)
}
return err
}, index)
}
errs := g.Wait()
return reduceWriteQuorumErrs(ctx, errs, bucketOpIgnoredErrs, (len(globalLocalDrives)/2)+1)
}
func (s *peerS3Server) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
bucketDeleted := r.Form.Get(peerS3BucketDeleted) == "true"
buckets, err := listBucketsLocal(r.Context(), BucketOptions{
Deleted: bucketDeleted,
})
if err != nil {
s.writeErrorResponse(w, err)
return
}
logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(buckets))
}
// GetBucketInfoHandler implements peer BuckeInfo call, returns bucket create date.
func (s *peerS3Server) GetBucketInfoHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
bucket := r.Form.Get(peerS3Bucket)
bucketDeleted := r.Form.Get(peerS3BucketDeleted) == "true"
bucketInfo, err := getBucketInfoLocal(r.Context(), bucket, BucketOptions{
Deleted: bucketDeleted,
})
if err != nil {
s.writeErrorResponse(w, err)
return
}
logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(bucketInfo))
}
// DeleteBucketHandler implements peer delete bucket call.
func (s *peerS3Server) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
bucket := r.Form.Get(peerS3Bucket)
if isMinioMetaBucket(bucket) {
s.writeErrorResponse(w, errInvalidArgument)
return
}
forceDelete := r.Form.Get(peerS3BucketForceDelete) == "true"
err := deleteBucketLocal(r.Context(), bucket, DeleteBucketOptions{
Force: forceDelete,
})
if err != nil {
s.writeErrorResponse(w, err)
return
}
}
// MakeBucketHandler implements peer create bucket call.
func (s *peerS3Server) MakeBucketHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
bucket := r.Form.Get(peerS3Bucket)
forceCreate := r.Form.Get(peerS3BucketForceCreate) == "true"
err := makeBucketLocal(r.Context(), bucket, MakeBucketOptions{
ForceCreate: forceCreate,
})
if err != nil {
s.writeErrorResponse(w, err)
return
}
}
// registerPeerS3Handlers - register peer s3 router.
func registerPeerS3Handlers(router *mux.Router) {
server := &peerS3Server{}
subrouter := router.PathPrefix(peerS3Prefix).Subrouter()
subrouter.Methods(http.MethodPost).Path(peerS3VersionPrefix + peerS3MethodHealth).HandlerFunc(httpTraceHdrs(server.HealthHandler))
subrouter.Methods(http.MethodPost).Path(peerS3VersionPrefix + peerS3MethodMakeBucket).HandlerFunc(httpTraceHdrs(server.MakeBucketHandler))
subrouter.Methods(http.MethodPost).Path(peerS3VersionPrefix + peerS3MethodDeleteBucket).HandlerFunc(httpTraceHdrs(server.DeleteBucketHandler))
subrouter.Methods(http.MethodPost).Path(peerS3VersionPrefix + peerS3MethodGetBucketInfo).HandlerFunc(httpTraceHdrs(server.GetBucketInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerS3VersionPrefix + peerS3MethodListBuckets).HandlerFunc(httpTraceHdrs(server.ListBucketsHandler))
}
<file_sep>package cmd
import (
"strings"
"sync"
"sync/atomic"
"time"
"unsafe"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/bucket/lifecycle"
)
//go:generate stringer -type=scannerMetric -trimprefix=scannerMetric $GOFILE
type scannerMetric uint8
type scannerMetrics struct {
// All fields must be accessed atomically and aligned.
operations [scannerMetricLast]uint64
latency [scannerMetricLastRealtime]lockedLastMinuteLatency
// actions records actions performed.
actions [lifecycle.ActionCount]uint64
actionsLatency [lifecycle.ActionCount]lockedLastMinuteLatency
// currentPaths contains (string,*currentPathTracker) for each disk processing.
// Alignment not required.
currentPaths sync.Map
cycleInfoMu sync.Mutex
cycleInfo *currentScannerCycle
}
var globalScannerMetrics scannerMetrics
const (
// START Realtime metrics, that only to records
// last minute latencies and total operation count.
scannerMetricReadMetadata scannerMetric = iota
scannerMetricCheckMissing
scannerMetricSaveUsage
scannerMetricApplyAll
scannerMetricApplyVersion
scannerMetricTierObjSweep
scannerMetricHealCheck
scannerMetricILM
scannerMetricCheckReplication
scannerMetricYield
scannerMetricCleanAbandoned
scannerMetricApplyNonCurrent
// START Trace metrics:
scannerMetricStartTrace
scannerMetricScanObject // Scan object. All operations included.
// END realtime metrics:
scannerMetricLastRealtime
// Trace only metrics:
scannerMetricScanFolder // Scan a folder on disk, recursively.
scannerMetricScanCycle // Full cycle, cluster global
scannerMetricScanBucketDrive // Single bucket on one drive
scannerMetricCompactFolder // Folder compacted.
// Must be last:
scannerMetricLast
)
// log scanner action.
// Use for s > scannerMetricStartTrace
func (p *scannerMetrics) log(s scannerMetric, paths ...string) func(custom map[string]string) {
startTime := time.Now()
return func(custom map[string]string) {
duration := time.Since(startTime)
atomic.AddUint64(&p.operations[s], 1)
if s < scannerMetricLastRealtime {
p.latency[s].add(duration)
}
if s > scannerMetricStartTrace && globalTrace.NumSubscribers(madmin.TraceScanner) > 0 {
globalTrace.Publish(scannerTrace(s, startTime, duration, strings.Join(paths, " "), custom))
}
}
}
// time a scanner action.
// Use for s < scannerMetricLastRealtime
func (p *scannerMetrics) time(s scannerMetric) func() {
startTime := time.Now()
return func() {
duration := time.Since(startTime)
atomic.AddUint64(&p.operations[s], 1)
if s < scannerMetricLastRealtime {
p.latency[s].add(duration)
}
}
}
// timeSize add time and size of a scanner action.
// Use for s < scannerMetricLastRealtime
func (p *scannerMetrics) timeSize(s scannerMetric) func(sz int) {
startTime := time.Now()
return func(sz int) {
duration := time.Since(startTime)
atomic.AddUint64(&p.operations[s], 1)
if s < scannerMetricLastRealtime {
p.latency[s].addSize(duration, int64(sz))
}
}
}
// incTime will increment time on metric s with a specific duration.
// Use for s < scannerMetricLastRealtime
func (p *scannerMetrics) incTime(s scannerMetric, d time.Duration) {
atomic.AddUint64(&p.operations[s], 1)
if s < scannerMetricLastRealtime {
p.latency[s].add(d)
}
}
// timeILM times an ILM action.
// lifecycle.NoneAction is ignored.
// Use for s < scannerMetricLastRealtime
func (p *scannerMetrics) timeILM(a lifecycle.Action) func() {
if a == lifecycle.NoneAction || a >= lifecycle.ActionCount {
return func() {}
}
startTime := time.Now()
return func() {
duration := time.Since(startTime)
atomic.AddUint64(&p.actions[a], 1)
p.actionsLatency[a].add(duration)
}
}
type currentPathTracker struct {
name *unsafe.Pointer // contains atomically accessed *string
}
// currentPathUpdater provides a lightweight update function for keeping track of
// current objects for each disk.
// Returns a function that can be used to update the current object
// and a function to call to when processing finished.
func (p *scannerMetrics) currentPathUpdater(disk, initial string) (update func(path string), done func()) {
initialPtr := unsafe.Pointer(&initial)
tracker := ¤tPathTracker{
name: &initialPtr,
}
p.currentPaths.Store(disk, tracker)
return func(path string) {
atomic.StorePointer(tracker.name, unsafe.Pointer(&path))
}, func() {
p.currentPaths.Delete(disk)
}
}
// getCurrentPaths returns the paths currently being processed.
func (p *scannerMetrics) getCurrentPaths() []string {
var res []string
prefix := globalLocalNodeName + "/"
p.currentPaths.Range(func(key, value interface{}) bool {
// We are a bit paranoid, but better miss an entry than crash.
name, ok := key.(string)
if !ok {
return true
}
obj, ok := value.(*currentPathTracker)
if !ok {
return true
}
strptr := (*string)(atomic.LoadPointer(obj.name))
if strptr != nil {
res = append(res, pathJoin(prefix, name, *strptr))
}
return true
})
return res
}
// activeDrives returns the number of currently active disks.
// (since this is concurrent it may not be 100% reliable)
func (p *scannerMetrics) activeDrives() int {
var i int
p.currentPaths.Range(func(k, v interface{}) bool {
i++
return true
})
return i
}
// lifetime returns the lifetime count of the specified metric.
func (p *scannerMetrics) lifetime(m scannerMetric) uint64 {
if m >= scannerMetricLast {
return 0
}
val := atomic.LoadUint64(&p.operations[m])
return val
}
// lastMinute returns the last minute statistics of a metric.
// m should be < scannerMetricLastRealtime
func (p *scannerMetrics) lastMinute(m scannerMetric) AccElem {
if m >= scannerMetricLastRealtime {
return AccElem{}
}
val := p.latency[m].total()
return val
}
// lifetimeActions returns the lifetime count of the specified ilm metric.
func (p *scannerMetrics) lifetimeActions(a lifecycle.Action) uint64 {
if a == lifecycle.NoneAction || a >= lifecycle.ActionCount {
return 0
}
val := atomic.LoadUint64(&p.actions[a])
return val
}
// lastMinuteActions returns the last minute statistics of an ilm metric.
func (p *scannerMetrics) lastMinuteActions(a lifecycle.Action) AccElem {
if a == lifecycle.NoneAction || a >= lifecycle.ActionCount {
return AccElem{}
}
val := p.actionsLatency[a].total()
return val
}
// setCycle updates the current cycle metrics.
func (p *scannerMetrics) setCycle(c *currentScannerCycle) {
if c != nil {
c2 := c.clone()
c = &c2
}
p.cycleInfoMu.Lock()
p.cycleInfo = c
p.cycleInfoMu.Unlock()
}
// getCycle returns the current cycle metrics.
// If not nil, the returned value can safely be modified.
func (p *scannerMetrics) getCycle() *currentScannerCycle {
p.cycleInfoMu.Lock()
defer p.cycleInfoMu.Unlock()
if p.cycleInfo == nil {
return nil
}
c := p.cycleInfo.clone()
return &c
}
func (p *scannerMetrics) report() madmin.ScannerMetrics {
var m madmin.ScannerMetrics
cycle := p.getCycle()
if cycle != nil {
m.CurrentCycle = cycle.current
m.CyclesCompletedAt = cycle.cycleCompleted
m.CurrentStarted = cycle.started
}
m.CollectedAt = time.Now()
m.ActivePaths = p.getCurrentPaths()
m.LifeTimeOps = make(map[string]uint64, scannerMetricLast)
for i := scannerMetric(0); i < scannerMetricLast; i++ {
if n := atomic.LoadUint64(&p.operations[i]); n > 0 {
m.LifeTimeOps[i.String()] = n
}
}
if len(m.LifeTimeOps) == 0 {
m.LifeTimeOps = nil
}
m.LastMinute.Actions = make(map[string]madmin.TimedAction, scannerMetricLastRealtime)
for i := scannerMetric(0); i < scannerMetricLastRealtime; i++ {
lm := p.lastMinute(i)
if lm.N > 0 {
m.LastMinute.Actions[i.String()] = lm.asTimedAction()
}
}
if len(m.LastMinute.Actions) == 0 {
m.LastMinute.Actions = nil
}
// ILM
m.LifeTimeILM = make(map[string]uint64)
for i := lifecycle.NoneAction + 1; i < lifecycle.ActionCount; i++ {
if n := atomic.LoadUint64(&p.actions[i]); n > 0 {
m.LifeTimeILM[i.String()] = n
}
}
if len(m.LifeTimeILM) == 0 {
m.LifeTimeILM = nil
}
if len(m.LifeTimeILM) > 0 {
m.LastMinute.ILM = make(map[string]madmin.TimedAction, len(m.LifeTimeILM))
for i := lifecycle.NoneAction + 1; i < lifecycle.ActionCount; i++ {
lm := p.lastMinuteActions(i)
if lm.N > 0 {
m.LastMinute.ILM[i.String()] = madmin.TimedAction{Count: uint64(lm.N), AccTime: uint64(lm.Total)}
}
}
if len(m.LastMinute.ILM) == 0 {
m.LastMinute.ILM = nil
}
}
return m
}
<file_sep># Bucket Versioning Design Guide [](https://slack.min.io) [](https://hub.docker.com/r/minio/minio/)
## Description of `xl.meta`
`xl.meta` is a new self describing backend format used by MinIO to support AWS S3 compatible versioning.
This file is the source of truth for each `version` at rest. `xl.meta` is a msgpack file serialized from a
well defined data structure. To understand `xl.meta` here are the few things to start with
`xl.meta` carries first 8 bytes an XL header which describes the current format and the format version,
allowing the unmarshaller's to automatically use the right data structures to parse the subsequent content in the stream.
### v1.0
| Entry | Encoding | Content
| ----------|-------------|----------------------------------------
| xlHeader | [4]byte | `'X', 'L', '2', ' '`
| xlVersion | [4]byte | `'1', ' ', ' ', ' '`
| xlMetaV2 | msgp object | All versions as single messagepack object
| [EOF] | |
### v1.1+
Version 1.1 added inline data, which will be placed after the metadata.
Therefore, the metadata is wrapped as a binary array for easy skipping.
| Entry | Encoding | Content
| ---------------|----------------|----------------------------------------
| xlHeader | [4]byte | `'X', 'L', '2', ' '`
| xlVersionMajor | uint16 | Major xl-meta version.
| xlVersionMinor | uint16 | Minor xl-meta version.
| xlMetaV2 | msgp bin array | Bin array with serialized metadata
| crc | msgp uint | Lower 32 bits of 64 bit xxhash of previous array contents (v1.2+ only)
| inline data | binary | Inline data if any, see Inline Data section for encoding.
| [EOF] | |
## v1.0-v1.2 Versions
`xl.meta` carries three types of object entries which designate the type of version object stored.
- ObjectType (default)
- LegacyObjectType (preserves existing deployments and older xl.json format)
- DeleteMarker (a versionId to capture the DELETE sequences implemented primarily for AWS spec compatibility)
A sample msgpack-JSON `xl.meta`, you can debug the content inside `xl.meta` using [xl-meta.go](https://github.com/minio/minio/tree/master/docs/debugging#decoding-metadata) program.
```json
{
"Versions": [
{
"Type": 1,
"V2Obj": {
"ID": "KWUs8S+8RZq4Vp5TWy6KFg==",
"DDir": "X3pDAFu8Rjyft7QD6t7W5g==",
"EcAlgo": 1,
"EcM": 2,
"EcN": 2,
"EcBSize": 10485760,
"EcIndex": 3,
"EcDist": [3, 4, 1, 2],
"CSumAlgo": 1,
"PartNums": [1],
"PartETags": [""],
"PartSizes": [314],
"PartASizes": [282],
"Size": 314,
"MTime": 1591820730,
"MetaSys": {
"X-Minio-Internal-Server-Side-Encryption-S3-Kms-Key-Id": "<KEY>",
"X-Minio-Internal-Server-Side-Encryption-S3-Kms-Sealed-Key": "<KEY>Fu<KEY>",
"X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "REFSRXYyLUhNQUMtU0hBMjU2",
"X-Minio-Internal-Server-Side-Encryption-Iv": "bW5YRDhRUGczMVhkc2pJT1V1UVlnbWJBcndIQVhpTUN1dn<KEY>az0=",
"X-Minio-Internal-Server-Side-Encryption-S3-Sealed-Key": "<KEY>
},
"MetaUsr": {
"content-type": "application/octet-stream",
"etag": "20000f00f58c508b40720270929bd90e9f07b9bd78fb605e5432a67635fc34722e4fc53b1d5fab9ff8400eb9ded4fba2"
}
}
}
]
}
```
### v1.3+ versions
Version 1.3 introduces changes to help with [faster metadata reads and updates](https://blog.min.io/minio-versioning-metadata-deep-dive/)
| Entry | Encoding | Content
| ----------------|-----------------------------|----------------------------------------
| xlHeaderVersion | msgp uint | header version identifier
| xlMetaVersion | msgp uint | metadata version identifier
| versions | msgp int | Number of versions following
| header_1 | msgp bin array | Header of version 1
| metadata_1 | msgp bin array | Metadata of version 1
| ...header_n | msgp bin array | Header of last version
| ...metadata_n | msgp bin array | Metadata of last version
Each header contains a mspg array (tuple) encoded object:
xlHeaderVersion version == 1:
```
//msgp:tuple xlMetaV2VersionHeader
type xlMetaV2VersionHeader struct {
VersionID [16]byte // Version UUID, raw.
ModTime int64 // Unix nanoseconds.
Signature [4]byte // Signature of metadata.
Type uint8 // Type if the version
Flags uint8
}
```
The following flags are defined:
```
const (
FreeVersion = 1 << 0
UsesDataDir = 1 << 1
InlineData = 1 << 2
)
```
The "Metadata" section contains a single version, encoded in similar fashion as each version in the `Versions` array
of the previous version.
## Inline Data
Inline data is optional. If no inline data is present, it is encoded as 0 bytes.
| Entry | Encoding | Content
| --------------------|-----------------------------|----------------------------------------
| xlMetaInlineDataVer | byte | version identifier
| id -> data | msgp `map[string][]byte` | Map of string id -> byte content
Currently only xlMetaInlineDataVer == 1 exists.
The ID is the string encoded Version ID of which the data corresponds.
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"fmt"
"net/url"
"runtime"
"sort"
"time"
"github.com/minio/dperf/pkg/dperf"
"github.com/minio/madmin-go/v3"
)
const speedTest = "speedtest"
type speedTestOpts struct {
objectSize int
concurrencyStart int
concurrency int
duration time.Duration
autotune bool
storageClass string
bucketName string
}
// Get the max throughput and iops numbers.
func objectSpeedTest(ctx context.Context, opts speedTestOpts) chan madmin.SpeedTestResult {
ch := make(chan madmin.SpeedTestResult, 1)
go func() {
defer close(ch)
concurrency := opts.concurrencyStart
if opts.autotune {
// if we have less drives than concurrency then choose
// only the concurrency to be number of drives to start
// with - since default '32' might be big and may not
// complete in total time of 10s.
if globalEndpoints.NEndpoints() < concurrency {
concurrency = globalEndpoints.NEndpoints()
}
// Check if we have local disks per pool less than
// the concurrency make sure we choose only the "start"
// concurrency to be equal to the lowest number of
// local disks per server.
for _, localDiskCount := range globalEndpoints.NLocalDisksPathsPerPool() {
if localDiskCount < concurrency {
concurrency = localDiskCount
}
}
// Any concurrency less than '4' just stick to '4' concurrent
// operations for now to begin with.
if concurrency < 4 {
concurrency = 4
}
// if GOMAXPROCS is set to a lower value then choose to use
// concurrency == GOMAXPROCS instead.
if runtime.GOMAXPROCS(0) < concurrency {
concurrency = runtime.GOMAXPROCS(0)
}
}
throughputHighestGet := uint64(0)
throughputHighestPut := uint64(0)
var throughputHighestResults []SpeedTestResult
sendResult := func() {
var result madmin.SpeedTestResult
durationSecs := opts.duration.Seconds()
result.GETStats.ThroughputPerSec = throughputHighestGet / uint64(durationSecs)
result.GETStats.ObjectsPerSec = throughputHighestGet / uint64(opts.objectSize) / uint64(durationSecs)
result.PUTStats.ThroughputPerSec = throughputHighestPut / uint64(durationSecs)
result.PUTStats.ObjectsPerSec = throughputHighestPut / uint64(opts.objectSize) / uint64(durationSecs)
var totalUploadTimes madmin.TimeDurations
var totalDownloadTimes madmin.TimeDurations
var totalDownloadTTFB madmin.TimeDurations
for i := 0; i < len(throughputHighestResults); i++ {
errStr := ""
if throughputHighestResults[i].Error != "" {
errStr = throughputHighestResults[i].Error
}
// if the default concurrency yields zero results, throw an error.
if throughputHighestResults[i].Downloads == 0 && opts.concurrencyStart == concurrency {
errStr = fmt.Sprintf("no results for downloads upon first attempt, concurrency %d and duration %s", opts.concurrencyStart, opts.duration)
}
// if the default concurrency yields zero results, throw an error.
if throughputHighestResults[i].Uploads == 0 && opts.concurrencyStart == concurrency {
errStr = fmt.Sprintf("no results for uploads upon first attempt, concurrency %d and duration %s", opts.concurrencyStart, opts.duration)
}
result.PUTStats.Servers = append(result.PUTStats.Servers, madmin.SpeedTestStatServer{
Endpoint: throughputHighestResults[i].Endpoint,
ThroughputPerSec: throughputHighestResults[i].Uploads / uint64(durationSecs),
ObjectsPerSec: throughputHighestResults[i].Uploads / uint64(opts.objectSize) / uint64(durationSecs),
Err: errStr,
})
result.GETStats.Servers = append(result.GETStats.Servers, madmin.SpeedTestStatServer{
Endpoint: throughputHighestResults[i].Endpoint,
ThroughputPerSec: throughputHighestResults[i].Downloads / uint64(durationSecs),
ObjectsPerSec: throughputHighestResults[i].Downloads / uint64(opts.objectSize) / uint64(durationSecs),
Err: errStr,
})
totalUploadTimes = append(totalUploadTimes, throughputHighestResults[i].UploadTimes...)
totalDownloadTimes = append(totalDownloadTimes, throughputHighestResults[i].DownloadTimes...)
totalDownloadTTFB = append(totalDownloadTTFB, throughputHighestResults[i].DownloadTTFB...)
}
result.PUTStats.Response = totalUploadTimes.Measure()
result.GETStats.Response = totalDownloadTimes.Measure()
result.GETStats.TTFB = totalDownloadTTFB.Measure()
result.Size = opts.objectSize
result.Disks = globalEndpoints.NEndpoints()
result.Servers = len(globalNotificationSys.peerClients) + 1
result.Version = Version
result.Concurrent = concurrency
select {
case ch <- result:
case <-ctx.Done():
return
}
}
for {
select {
case <-ctx.Done():
// If the client got disconnected stop the speedtest.
return
default:
}
sopts := speedTestOpts{
objectSize: opts.objectSize,
concurrency: concurrency,
duration: opts.duration,
storageClass: opts.storageClass,
bucketName: opts.bucketName,
}
results := globalNotificationSys.SpeedTest(ctx, sopts)
sort.Slice(results, func(i, j int) bool {
return results[i].Endpoint < results[j].Endpoint
})
totalPut := uint64(0)
totalGet := uint64(0)
for _, result := range results {
totalPut += result.Uploads
totalGet += result.Downloads
}
if totalGet < throughputHighestGet {
// Following check is for situations
// when Writes() scale higher than Reads()
// - practically speaking this never happens
// and should never happen - however it has
// been seen recently due to hardware issues
// causes Reads() to go slower than Writes().
//
// Send such results anyways as this shall
// expose a problem underneath.
if totalPut > throughputHighestPut {
throughputHighestResults = results
throughputHighestPut = totalPut
// let the client see lower value as well
throughputHighestGet = totalGet
}
sendResult()
break
}
// We break if we did not see 2.5% growth rate in total GET
// requests, we have reached our peak at this point.
doBreak := float64(totalGet-throughputHighestGet)/float64(totalGet) < 0.025
throughputHighestGet = totalGet
throughputHighestResults = results
throughputHighestPut = totalPut
if doBreak {
sendResult()
break
}
for _, result := range results {
if result.Error != "" {
// Break out on errors.
sendResult()
return
}
}
sendResult()
if !opts.autotune {
break
}
// Try with a higher concurrency to see if we get better throughput
concurrency += (concurrency + 1) / 2
}
}()
return ch
}
func driveSpeedTest(ctx context.Context, opts madmin.DriveSpeedTestOpts) madmin.DriveSpeedTestResult {
perf := &dperf.DrivePerf{
Serial: opts.Serial,
BlockSize: opts.BlockSize,
FileSize: opts.FileSize,
}
localPaths := globalEndpoints.LocalDisksPaths()
paths := func() (tmpPaths []string) {
for _, lp := range localPaths {
tmpPaths = append(tmpPaths, pathJoin(lp, minioMetaTmpBucket))
}
return tmpPaths
}()
scheme := "http"
if globalIsTLS {
scheme = "https"
}
u := &url.URL{
Scheme: scheme,
Host: globalLocalNodeName,
}
perfs, err := perf.Run(ctx, paths...)
return madmin.DriveSpeedTestResult{
Endpoint: u.String(),
Version: Version,
DrivePerf: func() (results []madmin.DrivePerf) {
for idx, r := range perfs {
result := madmin.DrivePerf{
Path: localPaths[idx],
ReadThroughput: r.ReadThroughput,
WriteThroughput: r.WriteThroughput,
Error: func() string {
if r.Error != nil {
return r.Error.Error()
}
return ""
}(),
}
results = append(results, result)
}
return results
}(),
Error: func() string {
if err != nil {
return err.Error()
}
return ""
}(),
}
}
<file_sep># MinIO FTP/SFTP Server
MinIO natively supports FTP/SFTP protocol, this allows any ftp/sftp client to upload and download files.
Currently supported `FTP/SFTP` operations are as follows:
| ftp-client commands | supported |
|:-------------------:|:----------|
| get | yes |
| put | yes |
| ls | yes |
| mkdir | yes |
| rmdir | yes |
| delete | yes |
| append | no |
| rename | no |
MinIO supports following FTP/SFTP based protocols to access and manage data.
- Secure File Transfer Protocol (SFTP) โ Defined by the Internet Engineering Task Force (IETF) as an
extended version of SSH 2.0, allowing file transfer over SSH and for use with Transport Layer
Security (TLS) and VPN applications.
- File Transfer Protocol over SSL/TLS (FTPS) โ Encrypted FTP communication via TLS certificates.
- File Transfer Protocol (FTP) โ Defined by RFC114 originally, and replaced by RFC765 and RFC959
unencrypted FTP communication (Not-recommended)
## Scope
- All IAM Credentials are allowed access excluding rotating credentials, rotating credentials
are not allowed to login via FTP/SFTP ports, you must use S3 API port for if you are using
rotating credentials.
- Access to bucket(s) and object(s) are governed via IAM policies associated with the incoming
login credentials.
- Allows authentication and access for all
- Built-in IDP users and their respective service accounts
- LDAP/AD users and their respective service accounts
- OpenID/OIDC service accounts
- On versioned buckets, FTP/SFTP only operates on latest objects, if you need to retrieve
an older version you must use an `S3 API client` such as [`mc`](https://github.com/minio/mc).
- All features currently used by your buckets will work as is without any changes
- SSE (Server Side Encryption)
- Replication (Server Side Replication)
## Prerequisites
- It is assumed you have users created and configured with relevant access policies, to start with
use basic "readwrite" canned policy to test all the operations before you finalize on what level
of restrictions are needed for a user.
- No "admin:*" operations are needed for FTP/SFTP access to the bucket(s) and object(s), so you may
skip them for restrictions.
## Usage
Start MinIO in a distributed setup, with 'ftp/sftp' enabled.
```
minio server http://server{1...4}/disk{1...4}
--ftp="address=:8021" --ftp="passive-port-range=30000-40000" \
--sftp="address=:8022" --sftp="ssh-private-key=/home/miniouser/.ssh/id_rsa"
...
...
```
Following example shows connecting via ftp client using `minioadmin` credentials, and list a bucket named `runner`:
```
ftp localhost -P 8021
Connected to localhost.
220 Welcome to MinIO FTP Server
Name (localhost:user): minioadmin
331 User name ok, password required
Password:
<PASSWORD>, continue
Remote system type is UNIX.
Using binary mode to transfer files.
ftp> ls runner/
229 Entering Extended Passive Mode (|||39155|)
150 Opening ASCII mode data connection for file list
drwxrwxrwx 1 nobody nobody 0 Jan 1 00:00 chunkdocs/
drwxrwxrwx 1 nobody nobody 0 Jan 1 00:00 testdir/
...
```
Following example shows how to list an object and download it locally via `ftp` client:
```
ftp> ls runner/chunkdocs/metadata
229 Entering Extended Passive Mode (|||44269|)
150 Opening ASCII mode data connection for file list
-rwxrwxrwx 1 nobody nobody 45 Apr 1 06:13 chunkdocs/metadata
226 Closing data connection, sent 75 bytes
ftp> get
(remote-file) runner/chunkdocs/metadata
(local-file) test
local: test remote: runner/chunkdocs/metadata
229 Entering Extended Passive Mode (|||37785|)
150 Data transfer starting 45 bytes
45 3.58 KiB/s
226 Closing data connection, sent 45 bytes
45 bytes received in 00:00 (3.55 KiB/s)
...
```
Following example shows connecting via sftp client using `minioadmin` credentials, and list a bucket named `runner`:
```
sftp -P 8022 minioadmin@localhost
minioadmin@localhost's password:
Connected to localhost.
sftp> ls runner/
chunkdocs testdir
```
Following example shows how to download an object locally via `sftp` client:
```
sftp> get runner/chunkdocs/metadata metadata
Fetching /runner/chunkdocs/metadata to metadata
metadata 100% 226 16.6KB/s 00:00
sftp>
```
## Advanced options
### Change default FTP port
Default port '8021' can be changed via
```
--ftp="address=:3021"
```
### Change FTP passive port range
By default FTP requests OS to give a free port automatically, however you may want to restrict
this to specific ports in certain restricted environments via
```
--ftp="passive-port-range=30000-40000"
```
### Change default SFTP port
Default port '8022' can be changed via
```
--sftp="address=:3022"
```
### TLS (FTP)
Unlike SFTP server, FTP server is insecure by default. To operate under TLS mode, you need to provide certificates via
```
--ftp="tls-private-key=path/to/private.key" --ftp="tls-public-cert=path/to/public.crt"
```
> NOTE: if MinIO distributed setup is already configured to run under TLS, FTP will automatically use the relevant
> certs from the server certificate chain, this is mainly to add simplicity of setup. However if you wish to terminate
> TLS certificates via a different domain for your FTP servers you may choose the above command line options.
<file_sep>// Copyright (c) 2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"time"
"github.com/minio/madmin-go/v3"
)
type lastDayTierStats struct {
Bins [24]tierStats
UpdatedAt time.Time
}
func (l *lastDayTierStats) addStats(ts tierStats) {
now := time.Now()
l.forwardTo(now)
nowIdx := now.Hour()
l.Bins[nowIdx] = l.Bins[nowIdx].add(ts)
}
// forwardTo moves time to t, clearing entries between last update and t.
func (l *lastDayTierStats) forwardTo(t time.Time) {
if t.IsZero() {
t = time.Now()
}
since := t.Sub(l.UpdatedAt).Hours()
// within the hour since l.UpdatedAt
if since < 1 {
return
}
idx, lastIdx := t.Hour(), l.UpdatedAt.Hour()
l.UpdatedAt = t // update to the latest time index
if since >= 24 {
l.Bins = [24]tierStats{}
return
}
for lastIdx != idx {
lastIdx = (lastIdx + 1) % 24
l.Bins[lastIdx] = tierStats{}
}
}
func (l *lastDayTierStats) clone() lastDayTierStats {
clone := lastDayTierStats{
UpdatedAt: l.UpdatedAt,
}
copy(clone.Bins[:], l.Bins[:])
return clone
}
func (l lastDayTierStats) merge(m lastDayTierStats) (merged lastDayTierStats) {
cl := l.clone()
cm := m.clone()
if cl.UpdatedAt.After(cm.UpdatedAt) {
cm.forwardTo(cl.UpdatedAt)
merged.UpdatedAt = cl.UpdatedAt
} else {
cl.forwardTo(cm.UpdatedAt)
merged.UpdatedAt = cm.UpdatedAt
}
for i := range cl.Bins {
merged.Bins[i] = cl.Bins[i].add(cm.Bins[i])
}
return merged
}
// DailyAllTierStats is used to aggregate last day tier stats across MinIO servers
type DailyAllTierStats map[string]lastDayTierStats
func (l DailyAllTierStats) merge(m DailyAllTierStats) {
for tier, st := range m {
l[tier] = l[tier].merge(st)
}
}
func (l DailyAllTierStats) addToTierInfo(tierInfos []madmin.TierInfo) []madmin.TierInfo {
for i := range tierInfos {
var lst lastDayTierStats
var ok bool
if lst, ok = l[tierInfos[i].Name]; !ok {
continue
}
for hr, st := range lst.Bins {
tierInfos[i].DailyStats.Bins[hr] = madmin.TierStats{
TotalSize: st.TotalSize,
NumVersions: st.NumVersions,
NumObjects: st.NumObjects,
}
}
tierInfos[i].DailyStats.UpdatedAt = lst.UpdatedAt
}
return tierInfos
}
<file_sep>#!/bin/bash
set -e
set -x
export CI=1
make || exit -1
killall -9 minio || true
rm -rf /tmp/xl/
mkdir -p /tmp/xl/1/ /tmp/xl/2/
export MINIO_KMS_SECRET_KEY="my-minio-key:<KEY>
NODES=4
args1=()
args2=()
for i in $(seq 1 $NODES); do
args1+=("http://localhost:$((9000 + i))/tmp/xl/1/$i ")
args2+=("http://localhost:$((9100 + i))/tmp/xl/2/$i ")
done
for i in $(seq 1 $NODES); do
./minio server --address "127.0.0.1:$((9000 + i))" ${args1[@]} & # | tee /tmp/minio/node.$i &
./minio server --address "127.0.0.1:$((9100 + i))" ${args2[@]} & # | tee /tmp/minio/node.$i &
done
sleep 10
./mc alias set myminio1 http://localhost:9001 minioadmin minioadmin
./mc alias set myminio2 http://localhost:9101 minioadmin minioadmin
sleep 1
./mc mb myminio1/testbucket/ --with-lock
./mc mb myminio2/testbucket/ --with-lock
./mc encrypt set sse-s3 my-minio-key myminio1/testbucket/
./mc encrypt set sse-s3 my-minio-key myminio2/testbucket/
./mc replicate add myminio1/testbucket --remote-bucket http://minioadmin:minioadmin@localhost:9101/testbucket --priority 1
./mc replicate add myminio2/testbucket --remote-bucket http://minioadmin:minioadmin@localhost:9001/testbucket --priority 1
sleep 1
cp README.md internal.tar
./mc cp internal.tar myminio1/testbucket/dir/1.tar
./mc cp internal.tar myminio2/testbucket/dir/2.tar
sleep 1
./mc ls -r --versions myminio1/testbucket/dir/ >/tmp/dir_1.txt
./mc ls -r --versions myminio2/testbucket/dir/ >/tmp/dir_2.txt
out=$(diff -qpruN /tmp/dir_1.txt /tmp/dir_2.txt)
ret=$?
if [ $ret -ne 0 ]; then
echo "BUG: expected no 'diff' after replication: $out"
exit 1
fi
<file_sep>//go:build windows
// +build windows
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"os"
"path/filepath"
"syscall"
)
func access(name string) error {
_, err := os.Lstat(name)
return err
}
func osMkdirAll(dirPath string, perm os.FileMode) error {
return os.MkdirAll(dirPath, perm)
}
// readDirFn applies the fn() function on each entries at dirPath, doesn't recurse into
// the directory itself, if the dirPath doesn't exist this function doesn't return
// an error.
func readDirFn(dirPath string, filter func(name string, typ os.FileMode) error) error {
// Ensure we don't pick up files as directories.
globAll := filepath.Clean(dirPath) + `\*`
globAllP, err := syscall.UTF16PtrFromString(globAll)
if err != nil {
return errInvalidArgument
}
data := &syscall.Win32finddata{}
handle, err := syscall.FindFirstFile(globAllP, data)
if err != nil {
if err = syscallErrToFileErr(dirPath, err); err == errFileNotFound {
return nil
}
return err
}
defer syscall.FindClose(handle)
for ; ; err = syscall.FindNextFile(handle, data) {
if err != nil {
if err == syscall.ERROR_NO_MORE_FILES {
break
} else {
if isSysErrPathNotFound(err) {
return nil
}
err = osErrToFileErr(&os.PathError{
Op: "FindNextFile",
Path: dirPath,
Err: err,
})
if err == errFileNotFound {
return nil
}
return err
}
}
name := syscall.UTF16ToString(data.FileName[0:])
if name == "" || name == "." || name == ".." { // Useless names
continue
}
var typ os.FileMode = 0 // regular file
switch {
case data.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0:
// Reparse point is a symlink
fi, err := os.Stat(pathJoin(dirPath, string(name)))
if err != nil {
// It got deleted in the meantime, not found
// or returns too many symlinks ignore this
// file/directory.
if osIsNotExist(err) || isSysErrPathNotFound(err) ||
isSysErrTooManySymlinks(err) {
continue
}
return err
}
if fi.IsDir() {
// Ignore symlinked directories.
continue
}
typ = fi.Mode()
case data.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0:
typ = os.ModeDir
}
if err = filter(name, typ); err == errDoneForNow {
// filtering requested to return by caller.
return nil
}
}
return nil
}
// Return N entries at the directory dirPath.
func readDirWithOpts(dirPath string, opts readDirOpts) (entries []string, err error) {
// Ensure we don't pick up files as directories.
globAll := filepath.Clean(dirPath) + `\*`
globAllP, err := syscall.UTF16PtrFromString(globAll)
if err != nil {
return nil, errInvalidArgument
}
data := &syscall.Win32finddata{}
handle, err := syscall.FindFirstFile(globAllP, data)
if err != nil {
return nil, syscallErrToFileErr(dirPath, err)
}
defer syscall.FindClose(handle)
count := opts.count
for ; count != 0; err = syscall.FindNextFile(handle, data) {
if err != nil {
if err == syscall.ERROR_NO_MORE_FILES {
break
} else {
return nil, osErrToFileErr(&os.PathError{
Op: "FindNextFile",
Path: dirPath,
Err: err,
})
}
}
name := syscall.UTF16ToString(data.FileName[0:])
if name == "" || name == "." || name == ".." { // Useless names
continue
}
switch {
case data.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0:
// Reparse point is a symlink
fi, err := os.Stat(pathJoin(dirPath, string(name)))
if err != nil {
// It got deleted in the meantime, not found
// or returns too many symlinks ignore this
// file/directory.
if osIsNotExist(err) || isSysErrPathNotFound(err) ||
isSysErrTooManySymlinks(err) {
continue
}
return nil, err
}
if !opts.followDirSymlink && fi.IsDir() {
// directory symlinks are ignored.
continue
}
case data.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0:
name = name + SlashSeparator
}
count--
entries = append(entries, name)
}
return entries, nil
}
func globalSync() {
// no-op on windows
}
func syscallErrToFileErr(dirPath string, err error) error {
switch err {
case nil:
return nil
case syscall.ERROR_FILE_NOT_FOUND:
return errFileNotFound
case syscall.ERROR_ACCESS_DENIED:
return errFileAccessDenied
default:
// Fails on file not found and when not a directory.
return osErrToFileErr(&os.PathError{
Op: "FindNextFile",
Path: dirPath,
Err: err,
})
}
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package event
import (
"fmt"
)
// ErrUnknownRegion - unknown region error.
type ErrUnknownRegion struct {
Region string
}
func (err ErrUnknownRegion) Error() string {
return fmt.Sprintf("unknown region '%v'", err.Region)
}
// ErrARNNotFound - ARN not found error.
type ErrARNNotFound struct {
ARN ARN
}
func (err ErrARNNotFound) Error() string {
return fmt.Sprintf("ARN '%v' not found", err.ARN)
}
// ErrInvalidARN - invalid ARN error.
type ErrInvalidARN struct {
ARN string
}
func (err ErrInvalidARN) Error() string {
return fmt.Sprintf("invalid ARN '%v'", err.ARN)
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"fmt"
"sync"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/sync/errgroup"
)
func (er erasureObjects) getOnlineDisks() (newDisks []StorageAPI) {
disks := er.getDisks()
var wg sync.WaitGroup
var mu sync.Mutex
for _, i := range hashOrder(UTCNow().String(), len(disks)) {
i := i
wg.Add(1)
go func() {
defer wg.Done()
if disks[i-1] == nil {
return
}
di, err := disks[i-1].DiskInfo(context.Background())
if err != nil || di.Healing {
// - Do not consume disks which are not reachable
// unformatted or simply not accessible for some reason.
//
// - Do not consume disks which are being healed
//
// - Future: skip busy disks
return
}
mu.Lock()
newDisks = append(newDisks, disks[i-1])
mu.Unlock()
}()
}
wg.Wait()
return newDisks
}
func (er erasureObjects) getLoadBalancedLocalDisks() (newDisks []StorageAPI) {
disks := er.getDisks()
// Based on the random shuffling return back randomized disks.
for _, i := range hashOrder(UTCNow().String(), len(disks)) {
if disks[i-1] != nil && disks[i-1].IsLocal() {
newDisks = append(newDisks, disks[i-1])
}
}
return newDisks
}
// getLoadBalancedDisks - fetches load balanced (sufficiently randomized) disk slice.
// ensures to skip disks if they are not healing and online.
func (er erasureObjects) getLoadBalancedDisks(optimized bool) []StorageAPI {
disks := er.getDisks()
if !optimized {
var newDisks []StorageAPI
for _, i := range hashOrder(UTCNow().String(), len(disks)) {
newDisks = append(newDisks, disks[i-1])
}
return newDisks
}
var wg sync.WaitGroup
var mu sync.Mutex
newDisks := map[uint64][]StorageAPI{}
// Based on the random shuffling return back randomized disks.
for _, i := range hashOrder(UTCNow().String(), len(disks)) {
i := i
wg.Add(1)
go func() {
defer wg.Done()
if disks[i-1] == nil {
return
}
di, err := disks[i-1].DiskInfo(context.Background())
if err != nil || di.Healing {
// - Do not consume disks which are not reachable
// unformatted or simply not accessible for some reason.
//
// - Do not consume disks which are being healed
//
// - Future: skip busy disks
return
}
mu.Lock()
// Capture disks usage wise upto resolution of MiB
newDisks[di.Used/1024/1024] = append(newDisks[di.Used/1024/1024], disks[i-1])
mu.Unlock()
}()
}
wg.Wait()
var max uint64
for k := range newDisks {
if k > max {
max = k
}
}
// Return disks which have maximum disk usage common.
return newDisks[max]
}
// readMultipleFiles Reads raw data from all specified files from all disks.
func readMultipleFiles(ctx context.Context, disks []StorageAPI, req ReadMultipleReq, readQuorum int) ([]ReadMultipleResp, error) {
resps := make([]chan ReadMultipleResp, len(disks))
for i := range resps {
resps[i] = make(chan ReadMultipleResp, len(req.Files))
}
g := errgroup.WithNErrs(len(disks))
// Read files in parallel across disks.
for index := range disks {
index := index
g.Go(func() (err error) {
if disks[index] == nil {
return errDiskNotFound
}
return disks[index].ReadMultiple(ctx, req, resps[index])
}, index)
}
dataArray := make([]ReadMultipleResp, 0, len(req.Files))
// Merge results. They should come in order from each.
for _, wantFile := range req.Files {
quorum := 0
toAdd := ReadMultipleResp{
Bucket: req.Bucket,
Prefix: req.Prefix,
File: wantFile,
}
for i := range resps {
if disks[i] == nil {
continue
}
select {
case <-ctx.Done():
case gotFile, ok := <-resps[i]:
if !ok {
continue
}
if gotFile.Error != "" || !gotFile.Exists {
continue
}
if gotFile.File != wantFile || gotFile.Bucket != req.Bucket || gotFile.Prefix != req.Prefix {
continue
}
quorum++
if toAdd.Modtime.After(gotFile.Modtime) || len(gotFile.Data) < len(toAdd.Data) {
// Pick latest, or largest to avoid possible truncated entries.
continue
}
toAdd = gotFile
}
}
if quorum < readQuorum {
toAdd.Exists = false
toAdd.Error = errErasureReadQuorum.Error()
toAdd.Data = nil
}
dataArray = append(dataArray, toAdd)
}
errs := g.Wait()
for index, err := range errs {
if err == nil {
continue
}
if !IsErr(err, []error{
errFileNotFound,
errVolumeNotFound,
errFileVersionNotFound,
errDiskNotFound,
errUnformattedDisk,
}...) {
logger.LogOnceIf(ctx, fmt.Errorf("Drive %s, path (%s/%s) returned an error (%w)",
disks[index], req.Bucket, req.Prefix, err),
disks[index].String())
}
}
// Return all the metadata.
return dataArray, nil
}
<file_sep>module github.com/minio/minio
go 1.19
require (
cloud.google.com/go/storage v1.30.1
github.com/Azure/azure-storage-blob-go v0.15.0
github.com/Shopify/sarama v1.38.1
github.com/alecthomas/participle v0.7.1
github.com/bcicen/jstream v1.0.1
github.com/beevik/ntp v1.1.1
github.com/buger/jsonparser v1.1.1
github.com/cespare/xxhash/v2 v2.2.0
github.com/cheggaaa/pb v1.0.29
github.com/coredns/coredns v1.10.1
github.com/coreos/go-oidc v2.2.1+incompatible
github.com/coreos/go-systemd/v22 v22.5.0
github.com/cosnicolaou/pbzip2 v1.0.2
github.com/dchest/siphash v1.2.3
github.com/djherbis/atime v1.1.0
github.com/dustin/go-humanize v1.0.1
github.com/eclipse/paho.mqtt.golang v1.4.2
github.com/elastic/go-elasticsearch/v7 v7.17.10
github.com/fatih/color v1.15.0
github.com/felixge/fgprof v0.9.3
github.com/fraugster/parquet-go v0.12.0
github.com/go-ldap/ldap/v3 v3.4.5
github.com/go-openapi/loads v0.21.2
github.com/go-sql-driver/mysql v1.7.1
github.com/golang-jwt/jwt/v4 v4.5.0
github.com/gomodule/redigo v1.8.9
github.com/google/uuid v1.3.0
github.com/hashicorp/golang-lru v0.5.4
github.com/inconshreveable/mousetrap v1.1.0
github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.16.6
github.com/klauspost/cpuid/v2 v2.2.5
github.com/klauspost/filepathx v1.1.1
github.com/klauspost/pgzip v1.2.6
github.com/klauspost/readahead v1.4.0
github.com/klauspost/reedsolomon v1.11.7
github.com/lib/pq v1.10.9
github.com/lithammer/shortuuid/v4 v4.0.0
github.com/miekg/dns v1.1.55
github.com/minio/cli v1.24.2
github.com/minio/console v0.30.1-0.20230623034122-b7b0271ec78c
github.com/minio/csvparser v1.0.0
github.com/minio/dperf v0.4.10
github.com/minio/highwayhash v1.0.2
github.com/minio/kes-go v0.1.0
github.com/minio/madmin-go/v2 v2.2.1
github.com/minio/madmin-go/v3 v3.0.4
github.com/minio/minio-go/v7 v7.0.58
github.com/minio/mux v1.9.0
github.com/minio/pkg v1.7.5
github.com/minio/selfupdate v0.6.0
github.com/minio/sha256-simd v1.0.1
github.com/minio/simdjson-go v0.4.5
github.com/minio/sio v0.3.1
github.com/minio/xxml v0.0.3
github.com/minio/zipindex v0.3.0
github.com/mitchellh/go-homedir v1.1.0
github.com/nats-io/nats-server/v2 v2.7.4
github.com/nats-io/nats.go v1.27.1
github.com/nats-io/stan.go v0.10.4
github.com/ncw/directio v1.0.5
github.com/nsqio/go-nsq v1.1.0
github.com/philhofer/fwd v1.1.2
github.com/pierrec/lz4 v2.6.1+incompatible
github.com/pkg/errors v0.9.1
github.com/pkg/sftp v1.13.5
github.com/prometheus/client_golang v1.16.0
github.com/prometheus/client_model v0.4.0
github.com/prometheus/common v0.44.0
github.com/prometheus/procfs v0.11.0
github.com/rabbitmq/amqp091-go v1.8.1
github.com/rs/cors v1.9.0
github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417
github.com/secure-io/sio-go v0.3.1
github.com/shirou/gopsutil v3.21.11+incompatible
github.com/shirou/gopsutil/v3 v3.23.5
github.com/tidwall/gjson v1.14.4
github.com/tinylib/msgp v1.1.8
github.com/valyala/bytebufferpool v1.0.0
github.com/xdg/scram v1.0.5
github.com/zeebo/xxh3 v1.0.2
go.etcd.io/etcd/api/v3 v3.5.9
go.etcd.io/etcd/client/v3 v3.5.9
go.uber.org/atomic v1.11.0
go.uber.org/zap v1.24.0
goftp.io/server/v2 v2.0.1
golang.org/x/crypto v0.10.0
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
golang.org/x/oauth2 v0.9.0
golang.org/x/sys v0.9.0
golang.org/x/time v0.3.0
google.golang.org/api v0.128.0
gopkg.in/yaml.v2 v2.4.0
)
require (
aead.dev/mem v0.2.0 // indirect
aead.dev/minisign v0.2.0 // indirect
cloud.google.com/go v0.110.3 // indirect
cloud.google.com/go/compute v1.20.1 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/iam v1.1.1 // indirect
github.com/Azure/azure-pipeline-go v0.2.3 // indirect
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/apache/thrift v0.18.1 // indirect
github.com/armon/go-metrics v0.4.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/charmbracelet/bubbles v0.16.1 // indirect
github.com/charmbracelet/bubbletea v0.24.2 // indirect
github.com/charmbracelet/lipgloss v0.7.1 // indirect
github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/eapache/go-resiliency v1.3.0 // indirect
github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 // indirect
github.com/eapache/queue v1.1.0 // indirect
github.com/fatih/structs v1.1.0 // indirect
github.com/frankban/quicktest v1.14.3 // indirect
github.com/gdamore/encoding v1.0.0 // indirect
github.com/gdamore/tcell/v2 v2.6.0 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-openapi/analysis v0.21.4 // indirect
github.com/go-openapi/errors v0.20.4 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/runtime v0.26.0 // indirect
github.com/go-openapi/spec v0.20.9 // indirect
github.com/go-openapi/strfmt v0.21.7 // indirect
github.com/go-openapi/swag v0.22.4 // indirect
github.com/go-openapi/validate v0.22.1 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 // indirect
github.com/google/s2a-go v0.1.4 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect
github.com/googleapis/gax-go/v2 v2.11.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-hclog v1.2.0 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-uuid v1.0.3 // indirect
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
github.com/jcmturner/gofork v1.7.6 // indirect
github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
github.com/jedib0t/go-pretty/v6 v6.4.6 // indirect
github.com/jessevdk/go-flags v1.5.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/juju/ratelimit v1.0.2 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/lestrrat-go/backoff/v2 v2.0.8 // indirect
github.com/lestrrat-go/blackmagic v1.0.1 // indirect
github.com/lestrrat-go/httpcc v1.0.1 // indirect
github.com/lestrrat-go/iter v1.0.2 // indirect
github.com/lestrrat-go/jwx v1.2.26 // indirect
github.com/lestrrat-go/option v1.0.1 // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/lufia/plan9stats v0.0.0-20230326075908-cb1d2100619a // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-ieproxy v0.0.11 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-localereader v0.0.1 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/minio/colorjson v1.0.5 // indirect
github.com/minio/filepath v1.0.0 // indirect
github.com/minio/mc v0.0.0-20230620210040-4b06db8e171f // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/minio/websocket v1.6.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/montanaflynn/stats v0.7.1 // indirect
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
github.com/muesli/cancelreader v0.2.2 // indirect
github.com/muesli/reflow v0.3.0 // indirect
github.com/muesli/termenv v0.15.1 // indirect
github.com/nats-io/jwt/v2 v2.2.1-0.20220113022732-58e87895b296 // indirect
github.com/nats-io/nats-streaming-server v0.24.3 // indirect
github.com/nats-io/nkeys v0.4.4 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/navidys/tvxwidgets v0.3.0 // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/pierrec/lz4/v4 v4.1.18 // indirect
github.com/pkg/xattr v0.4.9 // indirect
github.com/posener/complete v1.2.3 // indirect
github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect
github.com/pquerna/cachecontrol v0.2.0 // indirect
github.com/prometheus/prom2json v1.3.3 // indirect
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/rivo/tview v0.0.0-20230621164836-6cc0565babaf // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rjeczalik/notify v0.9.3 // indirect
github.com/rs/xid v1.5.0 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.1 // indirect
github.com/tklauser/go-sysconf v0.3.11 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/unrolled/secure v1.13.0 // indirect
github.com/xdg/stringprep v1.0.3 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect
go.mongodb.org/mongo-driver v1.12.0 // indirect
go.opencensus.io v0.24.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/mod v0.11.0 // indirect
golang.org/x/net v0.11.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/term v0.9.0 // indirect
golang.org/x/text v0.10.0 // indirect
golang.org/x/tools v0.10.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect
google.golang.org/grpc v1.56.1 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/h2non/filetype.v1 v1.0.5 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
<file_sep>module github.com/minio/minio/docs/debugging/reorder-disks
go 1.19
require github.com/minio/pkg v1.6.4
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package heal
import (
"errors"
"fmt"
"strconv"
"strings"
"sync"
"time"
"github.com/minio/minio/internal/config"
"github.com/minio/pkg/env"
)
// Compression environment variables
const (
Bitrot = "bitrotscan"
Sleep = "max_sleep"
IOCount = "max_io"
EnvBitrot = "MINIO_HEAL_BITROTSCAN"
EnvSleep = "MINIO_HEAL_MAX_SLEEP"
EnvIOCount = "MINIO_HEAL_MAX_IO"
)
var configMutex sync.RWMutex
// Config represents the heal settings.
type Config struct {
// Bitrot will perform bitrot scan on local disk when checking objects.
Bitrot string `json:"bitrotscan"`
// maximum sleep duration between objects to slow down heal operation.
Sleep time.Duration `json:"sleep"`
IOCount int `json:"iocount"`
// Cached value from Bitrot field
cache struct {
// -1: bitrot enabled, 0: bitrot disabled, > 0: bitrot cycle
bitrotCycle time.Duration
}
}
// BitrotScanCycle returns the configured cycle for the scanner healing
// -1 for not enabled
//
// 0 for contiunous bitrot scanning
//
// >0 interval duration between cycles
func (opts Config) BitrotScanCycle() (d time.Duration) {
configMutex.RLock()
defer configMutex.RUnlock()
return opts.cache.bitrotCycle
}
// Clone safely the heal configuration
func (opts Config) Clone() (int, time.Duration, string) {
configMutex.RLock()
defer configMutex.RUnlock()
return opts.IOCount, opts.Sleep, opts.Bitrot
}
// Update updates opts with nopts
func (opts *Config) Update(nopts Config) {
configMutex.Lock()
defer configMutex.Unlock()
opts.Bitrot = nopts.Bitrot
opts.IOCount = nopts.IOCount
opts.Sleep = nopts.Sleep
opts.cache.bitrotCycle, _ = parseBitrotConfig(nopts.Bitrot)
}
// DefaultKVS - default KV config for heal settings
var DefaultKVS = config.KVS{
config.KV{
Key: Bitrot,
Value: config.EnableOff,
},
config.KV{
Key: Sleep,
Value: "1s",
},
config.KV{
Key: IOCount,
Value: "100",
},
}
const minimumBitrotCycleInMonths = 1
func parseBitrotConfig(s string) (time.Duration, error) {
// Try to parse as a boolean
enabled, err := config.ParseBool(s)
if err == nil {
switch enabled {
case true:
return 0, nil
case false:
return -1, nil
}
}
// Try to parse as a number of months
if !strings.HasSuffix(s, "m") {
return -1, errors.New("unknown format")
}
months, err := strconv.Atoi(strings.TrimSuffix(s, "m"))
if err != nil {
return -1, err
}
if months < minimumBitrotCycleInMonths {
return -1, fmt.Errorf("minimum bitrot cycle is %d month(s)", minimumBitrotCycleInMonths)
}
return time.Duration(months) * 30 * 24 * time.Hour, nil
}
// LookupConfig - lookup config and override with valid environment settings if any.
func LookupConfig(kvs config.KVS) (cfg Config, err error) {
if err = config.CheckValidKeys(config.HealSubSys, kvs, DefaultKVS); err != nil {
return cfg, err
}
cfg.Bitrot = env.Get(EnvBitrot, kvs.GetWithDefault(Bitrot, DefaultKVS))
_, err = parseBitrotConfig(cfg.Bitrot)
if err != nil {
return cfg, fmt.Errorf("'heal:bitrotscan' value invalid: %w", err)
}
cfg.Sleep, err = time.ParseDuration(env.Get(EnvSleep, kvs.GetWithDefault(Sleep, DefaultKVS)))
if err != nil {
return cfg, fmt.Errorf("'heal:max_sleep' value invalid: %w", err)
}
cfg.IOCount, err = strconv.Atoi(env.Get(EnvIOCount, kvs.GetWithDefault(IOCount, DefaultKVS)))
if err != nil {
return cfg, fmt.Errorf("'heal:max_io' value invalid: %w", err)
}
return cfg, nil
}
<file_sep>FROM minio/minio:latest
ENV PATH=/opt/bin:$PATH
COPY ./minio /opt/bin/minio
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/data"]
CMD ["minio"]
<file_sep># Automatic Site Replication
This feature allows multiple independent MinIO sites (or clusters) that are using the same external IDentity Provider (IDP) to be configured as replicas. In this situation the set of replica sites are referred to as peer sites or just sites. When site-replication is enabled on a set of sites, the following changes are replicated to all other sites:
- Creation and deletion of buckets and objects
- Creation and deletion of all IAM users, groups, policies and their mappings to users or groups
- Creation of STS credentials
- Creation and deletion of service accounts (except those owned by the root user)
- Changes to Bucket features such as:
- Bucket Policies
- Bucket Tags
- Bucket Object-Lock configurations (including retention and legal hold configuration)
- Bucket Encryption configuration
> NOTE: Bucket versioning is automatically enabled for all new and existing buckets on all replicated sites.
The following Bucket features will **not be replicated**, is designed to differ between sites:
- Bucket notification configuration
- Bucket lifecycle (ILM) configuration
## Pre-requisites
- Initially, only **one** of the sites added for replication may have data. After site-replication is successfully configured, this data is replicated to the other (initially empty) sites. Subsequently, objects may be written to any of the sites, and they will be replicated to all other sites.
- All sites **must** have the same deployment credentials (i.e. `MINIO_ROOT_USER`, `MINIO_ROOT_PASSWORD`).
- **Removing a site** is not allowed from a set of replicated sites once configured.
- All sites must be using the **same** external IDP(s) if any.
- For [SSE-S3 or SSE-KMS encryption via KMS](https://min.io/docs/minio/linux/operations/server-side-encryption.html "MinIO KMS Guide"), all sites **must** have access to a central KMS deployment. This can be achieved via a central KES server or multiple KES servers (say one per site) connected via a central KMS (Vault) server.
## Configuring Site Replication
- Configure an alias in `mc` for each of the sites. For example if you have three MinIO sites, you may run:
```sh
mc alias set minio1 https://minio1.example.com:9000 adminuser adminpassword
mc alias set minio2 https://minio2.example.com:9000 adminuser adminpassword
mc alias set minio3 https://minio3.example.com:9000 adminuser adminpassword
```
or
```sh
export MC_HOST_minio1=https://adminuser:<EMAIL>@minio1.<EMAIL>
export MC_HOST_minio2=https://adminuser:adminpassword@minio2.<EMAIL>
export MC_HOST_minio3=https://adminuser:[email protected]
```
- Add site replication configuration with:
```sh
mc admin replicate add minio1 minio2 minio3
```
- Once the above command returns success, you may query site replication configuration with:
```sh
mc admin replicate info minio1
```
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"encoding/hex"
"fmt"
"math/rand"
"testing"
"time"
"github.com/google/uuid"
"github.com/minio/minio/internal/dsync"
)
func TestLocalLockerExpire(t *testing.T) {
wResources := make([]string, 1000)
rResources := make([]string, 1000)
l := newLocker()
ctx := context.Background()
for i := range wResources {
arg := dsync.LockArgs{
UID: mustGetUUID(),
Resources: []string{mustGetUUID()},
Source: t.Name(),
Owner: "owner",
Quorum: 0,
}
ok, err := l.Lock(ctx, arg)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("did not get write lock")
}
wResources[i] = arg.Resources[0]
}
for i := range rResources {
name := mustGetUUID()
arg := dsync.LockArgs{
UID: mustGetUUID(),
Resources: []string{name},
Source: t.Name(),
Owner: "owner",
Quorum: 0,
}
ok, err := l.RLock(ctx, arg)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("did not get write lock")
}
// RLock twice
ok, err = l.RLock(ctx, arg)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("did not get write lock")
}
rResources[i] = arg.Resources[0]
}
if len(l.lockMap) != len(rResources)+len(wResources) {
t.Fatalf("lockmap len, got %d, want %d + %d", len(l.lockMap), len(rResources), len(wResources))
}
if len(l.lockUID) != len(rResources)+len(wResources) {
t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), len(rResources), len(wResources))
}
// Expire an hour from now, should keep all
l.expireOldLocks(time.Hour)
if len(l.lockMap) != len(rResources)+len(wResources) {
t.Fatalf("lockmap len, got %d, want %d + %d", len(l.lockMap), len(rResources), len(wResources))
}
if len(l.lockUID) != len(rResources)+len(wResources) {
t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), len(rResources), len(wResources))
}
// Expire a minute ago.
l.expireOldLocks(-time.Minute)
if len(l.lockMap) != 0 {
t.Fatalf("after cleanup should be empty, got %d", len(l.lockMap))
}
if len(l.lockUID) != 0 {
t.Fatalf("lockUID len, got %d, want %d", len(l.lockUID), 0)
}
}
func TestLocalLockerUnlock(t *testing.T) {
const n = 1000
const m = 5
wResources := make([][m]string, n)
rResources := make([]string, n)
wUIDs := make([]string, n)
rUIDs := make([]string, 0, n*2)
l := newLocker()
ctx := context.Background()
for i := range wResources {
names := [m]string{}
for j := range names {
names[j] = mustGetUUID()
}
uid := mustGetUUID()
arg := dsync.LockArgs{
UID: uid,
Resources: names[:],
Source: t.Name(),
Owner: "owner",
Quorum: 0,
}
ok, err := l.Lock(ctx, arg)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("did not get write lock")
}
wResources[i] = names
wUIDs[i] = uid
}
for i := range rResources {
name := mustGetUUID()
uid := mustGetUUID()
arg := dsync.LockArgs{
UID: uid,
Resources: []string{name},
Source: t.Name(),
Owner: "owner",
Quorum: 0,
}
ok, err := l.RLock(ctx, arg)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("did not get write lock")
}
rUIDs = append(rUIDs, uid)
// RLock twice, different uid
uid = mustGetUUID()
arg.UID = uid
ok, err = l.RLock(ctx, arg)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("did not get write lock")
}
rResources[i] = name
rUIDs = append(rUIDs, uid)
}
// Each Lock has m entries
if len(l.lockMap) != len(rResources)+len(wResources)*m {
t.Fatalf("lockmap len, got %d, want %d + %d", len(l.lockMap), len(rResources), len(wResources)*m)
}
// A UID is added for every resource
if len(l.lockUID) != len(rResources)*2+len(wResources)*m {
t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), len(rResources)*2, len(wResources)*m)
}
// RUnlock once...
for i, name := range rResources {
arg := dsync.LockArgs{
UID: rUIDs[i*2],
Resources: []string{name},
Source: t.Name(),
Owner: "owner",
Quorum: 0,
}
ok, err := l.RUnlock(ctx, arg)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("did not get write lock")
}
}
// Each Lock has m entries
if len(l.lockMap) != len(rResources)+len(wResources)*m {
t.Fatalf("lockmap len, got %d, want %d + %d", len(l.lockMap), len(rResources), len(wResources)*m)
}
// A UID is added for every resource.
// We removed len(rResources) read sources.
if len(l.lockUID) != len(rResources)+len(wResources)*m {
t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), len(rResources), len(wResources)*m)
}
// RUnlock again, different uids
for i, name := range rResources {
arg := dsync.LockArgs{
UID: rUIDs[i*2+1],
Resources: []string{name},
Source: "minio",
Owner: "owner",
Quorum: 0,
}
ok, err := l.RUnlock(ctx, arg)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("did not get write lock")
}
}
// Each Lock has m entries
if len(l.lockMap) != 0+len(wResources)*m {
t.Fatalf("lockmap len, got %d, want %d + %d", len(l.lockMap), 0, len(wResources)*m)
}
// A UID is added for every resource.
// We removed Add Rlocked entries
if len(l.lockUID) != len(wResources)*m {
t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), 0, len(wResources)*m)
}
// Remove write locked
for i, names := range wResources {
arg := dsync.LockArgs{
UID: wUIDs[i],
Resources: names[:],
Source: "minio",
Owner: "owner",
Quorum: 0,
}
ok, err := l.Unlock(ctx, arg)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("did not get write lock")
}
}
// All should be gone now...
// Each Lock has m entries
if len(l.lockMap) != 0 {
t.Fatalf("lockmap len, got %d, want %d + %d", len(l.lockMap), 0, 0)
}
if len(l.lockUID) != 0 {
t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), 0, 0)
}
}
func Test_localLocker_expireOldLocksExpire(t *testing.T) {
rng := rand.New(rand.NewSource(0))
// Numbers of unique locks
for _, locks := range []int{100, 1000, 1e6} {
if testing.Short() && locks > 100 {
continue
}
t.Run(fmt.Sprintf("%d-locks", locks), func(t *testing.T) {
// Number of readers per lock...
for _, readers := range []int{1, 10, 100} {
if locks > 1000 && readers > 1 {
continue
}
if testing.Short() && readers > 10 {
continue
}
t.Run(fmt.Sprintf("%d-read", readers), func(t *testing.T) {
l := newLocker()
for i := 0; i < locks; i++ {
var tmp [16]byte
rng.Read(tmp[:])
res := []string{hex.EncodeToString(tmp[:])}
for i := 0; i < readers; i++ {
rng.Read(tmp[:])
ok, err := l.RLock(context.Background(), dsync.LockArgs{
UID: uuid.NewString(),
Resources: res,
Source: hex.EncodeToString(tmp[:8]),
Owner: hex.EncodeToString(tmp[8:]),
Quorum: 0,
})
if !ok || err != nil {
t.Fatal("failed:", err, ok)
}
}
}
start := time.Now()
l.expireOldLocks(time.Hour)
t.Logf("Scan Took: %v. Left: %d/%d", time.Since(start).Round(time.Millisecond), len(l.lockUID), len(l.lockMap))
if len(l.lockMap) != locks {
t.Fatalf("objects deleted, want %d != got %d", locks, len(l.lockMap))
}
if len(l.lockUID) != locks*readers {
t.Fatalf("objects deleted, want %d != got %d", locks*readers, len(l.lockUID))
}
// Expire 50%
expired := time.Now().Add(-time.Hour * 2)
for _, v := range l.lockMap {
for i := range v {
if rng.Intn(2) == 0 {
v[i].TimeLastRefresh = expired
}
}
}
start = time.Now()
l.expireOldLocks(time.Hour)
t.Logf("Expire 50%% took: %v. Left: %d/%d", time.Since(start).Round(time.Millisecond), len(l.lockUID), len(l.lockMap))
if len(l.lockUID) == locks*readers {
t.Fatalf("objects uids all remain, unlikely")
}
if len(l.lockMap) == 0 {
t.Fatalf("objects all deleted, 0 remains")
}
if len(l.lockUID) == 0 {
t.Fatalf("objects uids all deleted, 0 remains")
}
start = time.Now()
l.expireOldLocks(-time.Minute)
t.Logf("Expire rest took: %v. Left: %d/%d", time.Since(start).Round(time.Millisecond), len(l.lockUID), len(l.lockMap))
if len(l.lockMap) != 0 {
t.Fatalf("objects not deleted, want %d != got %d", 0, len(l.lockMap))
}
if len(l.lockUID) != 0 {
t.Fatalf("objects not deleted, want %d != got %d", 0, len(l.lockUID))
}
})
}
})
}
}
func Test_localLocker_RUnlock(t *testing.T) {
rng := rand.New(rand.NewSource(0))
// Numbers of unique locks
for _, locks := range []int{1, 100, 1000, 1e6} {
if testing.Short() && locks > 100 {
continue
}
t.Run(fmt.Sprintf("%d-locks", locks), func(t *testing.T) {
// Number of readers per lock...
for _, readers := range []int{1, 10, 100} {
if locks > 1000 && readers > 1 {
continue
}
if testing.Short() && readers > 10 {
continue
}
t.Run(fmt.Sprintf("%d-read", readers), func(t *testing.T) {
l := newLocker()
for i := 0; i < locks; i++ {
var tmp [16]byte
rng.Read(tmp[:])
res := []string{hex.EncodeToString(tmp[:])}
for i := 0; i < readers; i++ {
rng.Read(tmp[:])
ok, err := l.RLock(context.Background(), dsync.LockArgs{
UID: uuid.NewString(),
Resources: res,
Source: hex.EncodeToString(tmp[:8]),
Owner: hex.EncodeToString(tmp[8:]),
Quorum: 0,
})
if !ok || err != nil {
t.Fatal("failed:", err, ok)
}
}
}
// Expire 50%
toUnLock := make([]dsync.LockArgs, 0, locks*readers)
for k, v := range l.lockMap {
for _, lock := range v {
if rng.Intn(2) == 0 {
toUnLock = append(toUnLock, dsync.LockArgs{Resources: []string{k}, UID: lock.UID})
}
}
}
start := time.Now()
for _, lock := range toUnLock {
ok, err := l.ForceUnlock(context.Background(), lock)
if err != nil || !ok {
t.Fatal(err)
}
}
t.Logf("Expire 50%% took: %v. Left: %d/%d", time.Since(start).Round(time.Millisecond), len(l.lockUID), len(l.lockMap))
if len(l.lockUID) == locks*readers {
t.Fatalf("objects uids all remain, unlikely")
}
if len(l.lockMap) == 0 && locks > 10 {
t.Fatalf("objects all deleted, 0 remains")
}
if len(l.lockUID) != locks*readers-len(toUnLock) {
t.Fatalf("want %d objects uids all deleted, %d remains", len(l.lockUID), locks*readers-len(toUnLock))
}
toUnLock = toUnLock[:0]
for k, v := range l.lockMap {
for _, lock := range v {
toUnLock = append(toUnLock, dsync.LockArgs{Resources: []string{k}, UID: lock.UID, Owner: lock.Owner})
}
}
start = time.Now()
for _, lock := range toUnLock {
ok, err := l.RUnlock(context.TODO(), lock)
if err != nil || !ok {
t.Fatal(err)
}
}
t.Logf("Expire rest took: %v. Left: %d/%d", time.Since(start).Round(time.Millisecond), len(l.lockUID), len(l.lockMap))
if len(l.lockMap) != 0 {
t.Fatalf("objects not deleted, want %d != got %d", 0, len(l.lockMap))
}
if len(l.lockUID) != 0 {
t.Fatalf("objects not deleted, want %d != got %d", 0, len(l.lockUID))
}
})
}
})
}
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
"math/rand"
"net/http"
"net/url"
"path"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/dustin/go-humanize"
"github.com/lithammer/shortuuid/v4"
"github.com/minio/madmin-go/v3"
miniogo "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio-go/v7/pkg/encrypt"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/console"
"github.com/minio/pkg/env"
iampolicy "github.com/minio/pkg/iam/policy"
"github.com/minio/pkg/wildcard"
"github.com/minio/pkg/workers"
"gopkg.in/yaml.v2"
)
// replicate:
// # source of the objects to be replicated
// source:
// type: "minio"
// bucket: "testbucket"
// prefix: "spark/"
//
// # optional flags based filtering criteria
// # for source objects
// flags:
// filter:
// newerThan: "7d"
// olderThan: "7d"
// createdAfter: "date"
// createdBefore: "date"
// tags:
// - key: "name"
// value: "value*"
// metadata:
// - key: "content-type"
// value: "image/*"
// notify:
// endpoint: "https://splunk-hec.dev.com"
// token: "Splunk ..." # e.g. "Bearer token"
//
// # target where the objects must be replicated
// target:
// type: "minio"
// bucket: "testbucket1"
// endpoint: "https://play.min.io"
// path: "on"
// credentials:
// accessKey: "minioadmin"
// secretKey: "minioadmin"
// sessionToken: ""
// BatchJobReplicateKV is a datatype that holds key and values for filtering of objects
// used by metadata filter as well as tags based filtering.
type BatchJobReplicateKV struct {
Key string `yaml:"key" json:"key"`
Value string `yaml:"value" json:"value"`
}
// Validate returns an error if key is empty
func (kv BatchJobReplicateKV) Validate() error {
if kv.Key == "" {
return errInvalidArgument
}
return nil
}
// Empty indicates if kv is not set
func (kv BatchJobReplicateKV) Empty() bool {
return kv.Key == "" && kv.Value == ""
}
// Match matches input kv with kv, value will be wildcard matched depending on the user input
func (kv BatchJobReplicateKV) Match(ikv BatchJobReplicateKV) bool {
if kv.Empty() {
return true
}
if strings.EqualFold(kv.Key, ikv.Key) {
return wildcard.Match(kv.Value, ikv.Value)
}
return false
}
// BatchReplicateRetry datatype represents total retry attempts and delay between each retries.
type BatchReplicateRetry struct {
Attempts int `yaml:"attempts" json:"attempts"` // number of retry attempts
Delay time.Duration `yaml:"delay" json:"delay"` // delay between each retries
}
// Validate validates input replicate retries.
func (r BatchReplicateRetry) Validate() error {
if r.Attempts < 0 {
return errInvalidArgument
}
if r.Delay < 0 {
return errInvalidArgument
}
return nil
}
// BatchReplicateFilter holds all the filters currently supported for batch replication
type BatchReplicateFilter struct {
NewerThan time.Duration `yaml:"newerThan,omitempty" json:"newerThan"`
OlderThan time.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
CreatedAfter time.Time `yaml:"createdAfter,omitempty" json:"createdAfter"`
CreatedBefore time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
Tags []BatchJobReplicateKV `yaml:"tags,omitempty" json:"tags"`
Metadata []BatchJobReplicateKV `yaml:"metadata,omitempty" json:"metadata"`
}
// BatchReplicateNotification success or failure notification endpoint for each job attempts
type BatchReplicateNotification struct {
Endpoint string `yaml:"endpoint" json:"endpoint"`
Token string `yaml:"token" json:"token"`
}
// BatchJobReplicateFlags various configurations for replication job definition currently includes
// - filter
// - notify
// - retry
type BatchJobReplicateFlags struct {
Filter BatchReplicateFilter `yaml:"filter" json:"filter"`
Notify BatchReplicateNotification `yaml:"notify" json:"notify"`
Retry BatchReplicateRetry `yaml:"retry" json:"retry"`
}
// BatchJobReplicateResourceType defines the type of batch jobs
type BatchJobReplicateResourceType string
// Validate validates if the replicate resource type is recognized and supported
func (t BatchJobReplicateResourceType) Validate() error {
switch t {
case BatchJobReplicateResourceMinIO:
case BatchJobReplicateResourceS3:
default:
return errInvalidArgument
}
return nil
}
func (t BatchJobReplicateResourceType) isMinio() bool {
return t == BatchJobReplicateResourceMinIO
}
// Different types of batch jobs..
const (
BatchJobReplicateResourceMinIO BatchJobReplicateResourceType = "minio"
BatchJobReplicateResourceS3 BatchJobReplicateResourceType = "s3"
// add future targets
)
// BatchJobReplicateCredentials access credentials for batch replication it may
// be either for target or source.
type BatchJobReplicateCredentials struct {
AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty" yaml:"accessKey"`
SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty" yaml:"secretKey"`
SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty" yaml:"sessionToken"`
}
// Empty indicates if credentials are not set
func (c BatchJobReplicateCredentials) Empty() bool {
return c.AccessKey == "" && c.SecretKey == "" && c.SessionToken == ""
}
// Validate validates if credentials are valid
func (c BatchJobReplicateCredentials) Validate() error {
if !auth.IsAccessKeyValid(c.AccessKey) || !auth.IsSecretKeyValid(c.SecretKey) {
return errInvalidArgument
}
return nil
}
// BatchJobReplicateTarget describes target element of the replication job that receives
// the filtered data from source
type BatchJobReplicateTarget struct {
Type BatchJobReplicateResourceType `yaml:"type" json:"type"`
Bucket string `yaml:"bucket" json:"bucket"`
Prefix string `yaml:"prefix" json:"prefix"`
Endpoint string `yaml:"endpoint" json:"endpoint"`
Path string `yaml:"path" json:"path"`
Creds BatchJobReplicateCredentials `yaml:"credentials" json:"credentials"`
}
// ValidPath returns true if path is valid
func (t BatchJobReplicateTarget) ValidPath() bool {
return t.Path == "on" || t.Path == "off" || t.Path == "auto" || t.Path == ""
}
// BatchJobReplicateSource describes source element of the replication job that is
// the source of the data for the target
type BatchJobReplicateSource struct {
Type BatchJobReplicateResourceType `yaml:"type" json:"type"`
Bucket string `yaml:"bucket" json:"bucket"`
Prefix string `yaml:"prefix" json:"prefix"`
Endpoint string `yaml:"endpoint" json:"endpoint"`
Path string `yaml:"path" json:"path"`
Creds BatchJobReplicateCredentials `yaml:"credentials" json:"credentials"`
}
// ValidPath returns true if path is valid
func (s BatchJobReplicateSource) ValidPath() bool {
switch s.Path {
case "on", "off", "auto", "":
return true
default:
return false
}
}
// BatchJobReplicateV1 v1 of batch job replication
type BatchJobReplicateV1 struct {
APIVersion string `yaml:"apiVersion" json:"apiVersion"`
Flags BatchJobReplicateFlags `yaml:"flags" json:"flags"`
Target BatchJobReplicateTarget `yaml:"target" json:"target"`
Source BatchJobReplicateSource `yaml:"source" json:"source"`
clnt *miniogo.Core `msg:"-"`
}
// RemoteToLocal returns true if source is remote and target is local
func (r BatchJobReplicateV1) RemoteToLocal() bool {
return !r.Source.Creds.Empty()
}
// BatchJobRequest this is an internal data structure not for external consumption.
type BatchJobRequest struct {
ID string `yaml:"-" json:"name"`
User string `yaml:"-" json:"user"`
Started time.Time `yaml:"-" json:"started"`
Location string `yaml:"-" json:"location"`
Replicate *BatchJobReplicateV1 `yaml:"replicate" json:"replicate"`
KeyRotate *BatchJobKeyRotateV1 `yaml:"keyrotate" json:"keyrotate"`
ctx context.Context `msg:"-"`
}
// Notify notifies notification endpoint if configured regarding job failure or success.
func (r BatchJobReplicateV1) Notify(ctx context.Context, body io.Reader) error {
if r.Flags.Notify.Endpoint == "" {
return nil
}
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
req, err := http.NewRequestWithContext(ctx, http.MethodPost, r.Flags.Notify.Endpoint, body)
if err != nil {
return err
}
if r.Flags.Notify.Token != "" {
req.Header.Set("Authorization", r.Flags.Notify.Token)
}
clnt := http.Client{Transport: getRemoteInstanceTransport}
resp, err := clnt.Do(req)
if err != nil {
return err
}
xhttp.DrainBody(resp.Body)
if resp.StatusCode != http.StatusOK {
return errors.New(resp.Status)
}
return nil
}
// ReplicateFromSource - this is not implemented yet where source is 'remote' and target is local.
func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api ObjectLayer, core *miniogo.Core, srcObjInfo ObjectInfo, retry bool) error {
srcBucket := r.Source.Bucket
tgtBucket := r.Target.Bucket
srcObject := srcObjInfo.Name
tgtObject := srcObjInfo.Name
if r.Target.Prefix != "" {
tgtObject = path.Join(r.Target.Prefix, srcObjInfo.Name)
}
versioned := globalBucketVersioningSys.PrefixEnabled(tgtBucket, tgtObject)
versionSuspended := globalBucketVersioningSys.PrefixSuspended(tgtBucket, tgtObject)
versionID := srcObjInfo.VersionID
if r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3 {
versionID = ""
}
if srcObjInfo.DeleteMarker {
_, err := api.DeleteObject(ctx, tgtBucket, tgtObject, ObjectOptions{
VersionID: versionID,
VersionSuspended: versionSuspended,
Versioned: versioned,
MTime: srcObjInfo.ModTime,
DeleteMarker: srcObjInfo.DeleteMarker,
ReplicationRequest: true,
})
return err
}
opts := ObjectOptions{
VersionID: srcObjInfo.VersionID,
Versioned: versioned,
VersionSuspended: versionSuspended,
MTime: srcObjInfo.ModTime,
PreserveETag: srcObjInfo.ETag,
UserDefined: srcObjInfo.UserDefined,
}
if r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3 {
opts.VersionID = ""
}
if crypto.S3.IsEncrypted(srcObjInfo.UserDefined) {
opts.ServerSideEncryption = encrypt.NewSSE()
}
slc := strings.Split(srcObjInfo.ETag, "-")
if len(slc) == 2 {
partsCount, err := strconv.Atoi(slc[1])
if err != nil {
return err
}
return r.copyWithMultipartfromSource(ctx, api, core, srcObjInfo, opts, partsCount)
}
gopts := miniogo.GetObjectOptions{
VersionID: srcObjInfo.VersionID,
}
if err := gopts.SetMatchETag(srcObjInfo.ETag); err != nil {
return err
}
rd, objInfo, _, err := core.GetObject(ctx, srcBucket, srcObject, gopts)
if err != nil {
return err
}
defer rd.Close()
hr, err := hash.NewReader(rd, objInfo.Size, "", "", objInfo.Size)
if err != nil {
return err
}
pReader := NewPutObjReader(hr)
_, err = api.PutObject(ctx, tgtBucket, tgtObject, pReader, opts)
return err
}
func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, api ObjectLayer, c *miniogo.Core, srcObjInfo ObjectInfo, opts ObjectOptions, partsCount int) (err error) {
srcBucket := r.Source.Bucket
tgtBucket := r.Target.Bucket
srcObject := srcObjInfo.Name
tgtObject := srcObjInfo.Name
if r.Target.Prefix != "" {
tgtObject = path.Join(r.Target.Prefix, srcObjInfo.Name)
}
if r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3 {
opts.VersionID = ""
}
var uploadedParts []CompletePart
res, err := api.NewMultipartUpload(context.Background(), tgtBucket, tgtObject, opts)
if err != nil {
return err
}
defer func() {
if err != nil {
// block and abort remote upload upon failure.
attempts := 1
for attempts <= 3 {
aerr := api.AbortMultipartUpload(ctx, tgtBucket, tgtObject, res.UploadID, ObjectOptions{})
if aerr == nil {
return
}
logger.LogIf(ctx,
fmt.Errorf("trying %s: Unable to cleanup failed multipart replication %s on remote %s/%s: %w - this may consume space on remote cluster",
humanize.Ordinal(attempts), res.UploadID, tgtBucket, tgtObject, aerr))
attempts++
time.Sleep(time.Second)
}
}
}()
var (
hr *hash.Reader
pInfo PartInfo
)
for i := 0; i < partsCount; i++ {
gopts := miniogo.GetObjectOptions{
VersionID: srcObjInfo.VersionID,
PartNumber: i + 1,
}
if err := gopts.SetMatchETag(srcObjInfo.ETag); err != nil {
return err
}
rd, objInfo, _, err := c.GetObject(ctx, srcBucket, srcObject, gopts)
if err != nil {
return err
}
defer rd.Close()
hr, err = hash.NewReader(io.LimitReader(rd, objInfo.Size), objInfo.Size, "", "", objInfo.Size)
if err != nil {
return err
}
pReader := NewPutObjReader(hr)
opts.PreserveETag = ""
pInfo, err = api.PutObjectPart(ctx, tgtBucket, tgtObject, res.UploadID, i+1, pReader, opts)
if err != nil {
return err
}
if pInfo.Size != objInfo.Size {
return fmt.Errorf("Part size mismatch: got %d, want %d", pInfo.Size, objInfo.Size)
}
uploadedParts = append(uploadedParts, CompletePart{
PartNumber: pInfo.PartNumber,
ETag: pInfo.ETag,
})
}
_, err = api.CompleteMultipartUpload(ctx, tgtBucket, tgtObject, res.UploadID, uploadedParts, opts)
return err
}
// StartFromSource starts the batch replication job from remote source, resumes if there was a pending job via "job.ID"
func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLayer, job BatchJobRequest) error {
ri := &batchJobInfo{
JobID: job.ID,
JobType: string(job.Type()),
StartTime: job.Started,
}
if err := ri.load(ctx, api, job); err != nil {
return err
}
globalBatchJobsMetrics.save(job.ID, ri)
delay := job.Replicate.Flags.Retry.Delay
if delay == 0 {
delay = batchReplJobDefaultRetryDelay
}
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
skip := func(oi ObjectInfo) (ok bool) {
if r.Flags.Filter.OlderThan > 0 && time.Since(oi.ModTime) < r.Flags.Filter.OlderThan {
// skip all objects that are newer than specified older duration
return true
}
if r.Flags.Filter.NewerThan > 0 && time.Since(oi.ModTime) >= r.Flags.Filter.NewerThan {
// skip all objects that are older than specified newer duration
return true
}
if !r.Flags.Filter.CreatedAfter.IsZero() && r.Flags.Filter.CreatedAfter.Before(oi.ModTime) {
// skip all objects that are created before the specified time.
return true
}
if !r.Flags.Filter.CreatedBefore.IsZero() && r.Flags.Filter.CreatedBefore.After(oi.ModTime) {
// skip all objects that are created after the specified time.
return true
}
if len(r.Flags.Filter.Tags) > 0 {
// Only parse object tags if tags filter is specified.
tagMap := map[string]string{}
tagStr := oi.UserTags
if len(tagStr) != 0 {
t, err := tags.ParseObjectTags(tagStr)
if err != nil {
return false
}
tagMap = t.ToMap()
}
for _, kv := range r.Flags.Filter.Tags {
for t, v := range tagMap {
if kv.Match(BatchJobReplicateKV{Key: t, Value: v}) {
return true
}
}
}
// None of the provided tags filter match skip the object
return false
}
if len(r.Flags.Filter.Metadata) > 0 {
for _, kv := range r.Flags.Filter.Metadata {
for k, v := range oi.UserDefined {
if !strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") && !isStandardHeader(k) {
continue
}
// We only need to match x-amz-meta or standardHeaders
if kv.Match(BatchJobReplicateKV{Key: k, Value: v}) {
return true
}
}
}
// None of the provided metadata filters match skip the object.
return false
}
return false
}
u, err := url.Parse(r.Source.Endpoint)
if err != nil {
return err
}
cred := r.Source.Creds
c, err := miniogo.New(u.Host, &miniogo.Options{
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
Secure: u.Scheme == "https",
Transport: getRemoteInstanceTransport,
BucketLookup: lookupStyle(r.Source.Path),
})
if err != nil {
return err
}
c.SetAppInfo("minio-"+batchJobPrefix, r.APIVersion+" "+job.ID)
core := &miniogo.Core{Client: c}
workerSize, err := strconv.Atoi(env.Get("_MINIO_BATCH_REPLICATION_WORKERS", strconv.Itoa(runtime.GOMAXPROCS(0)/2)))
if err != nil {
return err
}
wk, err := workers.New(workerSize)
if err != nil {
// invalid worker size.
return err
}
retryAttempts := ri.RetryAttempts
retry := false
for attempts := 1; attempts <= retryAttempts; attempts++ {
attempts := attempts
// one of source/target is s3, skip delete marker and all versions under the same object name.
s3Type := r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3
minioSrc := r.Source.Type == BatchJobReplicateResourceMinIO
ctx, cancel := context.WithCancel(ctx)
objInfoCh := c.ListObjects(ctx, r.Source.Bucket, miniogo.ListObjectsOptions{
Prefix: r.Source.Prefix,
WithVersions: minioSrc,
Recursive: true,
WithMetadata: true,
})
prevObj := ""
skipReplicate := false
for obj := range objInfoCh {
oi := toObjectInfo(r.Source.Bucket, obj.Key, obj)
if !minioSrc {
oi2, err := c.StatObject(ctx, r.Source.Bucket, obj.Key, miniogo.StatObjectOptions{})
if err == nil {
oi = toObjectInfo(r.Source.Bucket, obj.Key, oi2)
} else {
if isErrMethodNotAllowed(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) ||
isErrObjectNotFound(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) {
continue
}
logger.LogIf(ctx, err)
cancel()
return err
}
}
if skip(oi) {
continue
}
if obj.Key != prevObj {
prevObj = obj.Key
// skip replication of delete marker and all versions under the same object name if one of source or target is s3.
skipReplicate = obj.IsDeleteMarker && s3Type
}
if skipReplicate {
continue
}
wk.Take()
go func() {
defer wk.Give()
stopFn := globalBatchJobsMetrics.trace(batchReplicationMetricObject, job.ID, attempts, oi)
success := true
if err := r.ReplicateFromSource(ctx, api, core, oi, retry); err != nil {
// object must be deleted concurrently, allow these failures but do not count them
if isErrVersionNotFound(err) || isErrObjectNotFound(err) {
return
}
stopFn(err)
logger.LogIf(ctx, err)
success = false
} else {
stopFn(nil)
}
ri.trackCurrentBucketObject(r.Target.Bucket, oi, success)
globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk after every 10secs.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
}()
}
wk.Wait()
ri.RetryAttempts = attempts
ri.Complete = ri.ObjectsFailed == 0
ri.Failed = ri.ObjectsFailed > 0
globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
buf, _ := json.Marshal(ri)
if err := r.Notify(ctx, bytes.NewReader(buf)); err != nil {
logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err))
}
cancel()
if ri.Failed {
ri.ObjectsFailed = 0
ri.Bucket = ""
ri.Object = ""
ri.Objects = 0
ri.BytesFailed = 0
ri.BytesTransferred = 0
retry = true // indicate we are retrying..
time.Sleep(delay + time.Duration(rnd.Float64()*float64(delay)))
continue
}
break
}
return nil
}
// toObjectInfo converts minio.ObjectInfo to ObjectInfo
func toObjectInfo(bucket, object string, objInfo miniogo.ObjectInfo) ObjectInfo {
tags, _ := tags.MapToObjectTags(objInfo.UserTags)
oi := ObjectInfo{
Bucket: bucket,
Name: object,
ModTime: objInfo.LastModified,
Size: objInfo.Size,
ETag: objInfo.ETag,
VersionID: objInfo.VersionID,
IsLatest: objInfo.IsLatest,
DeleteMarker: objInfo.IsDeleteMarker,
ContentType: objInfo.ContentType,
Expires: objInfo.Expires,
StorageClass: objInfo.StorageClass,
ReplicationStatusInternal: objInfo.ReplicationStatus,
UserTags: tags.String(),
}
oi.UserDefined = make(map[string]string, len(objInfo.Metadata))
for k, v := range objInfo.Metadata {
oi.UserDefined[k] = v[0]
}
ce, ok := oi.UserDefined[xhttp.ContentEncoding]
if !ok {
ce, ok = oi.UserDefined[strings.ToLower(xhttp.ContentEncoding)]
}
if ok {
oi.ContentEncoding = ce
}
return oi
}
// ReplicateToTarget read from source and replicate to configured target
func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectLayer, c *miniogo.Core, srcObjInfo ObjectInfo, retry bool) error {
srcBucket := r.Source.Bucket
tgtBucket := r.Target.Bucket
tgtPrefix := r.Target.Prefix
srcObject := srcObjInfo.Name
s3Type := r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3
if srcObjInfo.DeleteMarker || !srcObjInfo.VersionPurgeStatus.Empty() {
if retry && !s3Type {
if _, err := c.StatObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), miniogo.StatObjectOptions{
VersionID: srcObjInfo.VersionID,
Internal: miniogo.AdvancedGetOptions{
ReplicationProxyRequest: "false",
},
}); isErrMethodNotAllowed(ErrorRespToObjectError(err, tgtBucket, pathJoin(tgtPrefix, srcObject))) {
return nil
}
}
versionID := srcObjInfo.VersionID
dmVersionID := ""
if srcObjInfo.VersionPurgeStatus.Empty() {
dmVersionID = srcObjInfo.VersionID
}
if r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3 {
dmVersionID = ""
versionID = ""
}
return c.RemoveObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), miniogo.RemoveObjectOptions{
VersionID: versionID,
Internal: miniogo.AdvancedRemoveOptions{
ReplicationDeleteMarker: dmVersionID != "",
ReplicationMTime: srcObjInfo.ModTime,
ReplicationStatus: miniogo.ReplicationStatusReplica,
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
},
})
}
if retry && !s3Type { // when we are retrying avoid copying if necessary.
gopts := miniogo.GetObjectOptions{}
if err := gopts.SetMatchETag(srcObjInfo.ETag); err != nil {
return err
}
if _, err := c.StatObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), gopts); err == nil {
return nil
}
}
versioned := globalBucketVersioningSys.PrefixEnabled(srcBucket, srcObject)
versionSuspended := globalBucketVersioningSys.PrefixSuspended(srcBucket, srcObject)
opts := ObjectOptions{
VersionID: srcObjInfo.VersionID,
Versioned: versioned,
VersionSuspended: versionSuspended,
}
rd, err := api.GetObjectNInfo(ctx, srcBucket, srcObject, nil, http.Header{}, opts)
if err != nil {
return err
}
defer rd.Close()
objInfo := rd.ObjInfo
size, err := objInfo.GetActualSize()
if err != nil {
return err
}
putOpts, err := batchReplicationOpts(ctx, "", objInfo)
if err != nil {
return err
}
if r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3 {
putOpts.Internal = miniogo.AdvancedPutOptions{}
}
if objInfo.isMultipart() {
if err := replicateObjectWithMultipart(ctx, c, tgtBucket, pathJoin(tgtPrefix, objInfo.Name), rd, objInfo, putOpts); err != nil {
return err
}
} else {
if _, err = c.PutObject(ctx, tgtBucket, pathJoin(tgtPrefix, objInfo.Name), rd, size, "", "", putOpts); err != nil {
return err
}
}
return nil
}
//go:generate msgp -file $GOFILE -unexported
// batchJobInfo current batch replication information
type batchJobInfo struct {
mu sync.RWMutex `json:"-" msg:"-"`
Version int `json:"-" msg:"v"`
JobID string `json:"jobID" msg:"jid"`
JobType string `json:"jobType" msg:"jt"`
StartTime time.Time `json:"startTime" msg:"st"`
LastUpdate time.Time `json:"lastUpdate" msg:"lu"`
RetryAttempts int `json:"retryAttempts" msg:"ra"`
Complete bool `json:"complete" msg:"cmp"`
Failed bool `json:"failed" msg:"fld"`
// Last bucket/object batch replicated
Bucket string `json:"-" msg:"lbkt"`
Object string `json:"-" msg:"lobj"`
// Verbose information
Objects int64 `json:"objects" msg:"ob"`
DeleteMarkers int64 `json:"deleteMarkers" msg:"dm"`
ObjectsFailed int64 `json:"objectsFailed" msg:"obf"`
DeleteMarkersFailed int64 `json:"deleteMarkersFailed" msg:"dmf"`
BytesTransferred int64 `json:"bytesTransferred" msg:"bt"`
BytesFailed int64 `json:"bytesFailed" msg:"bf"`
}
const (
batchReplName = "batch-replicate.bin"
batchReplFormat = 1
batchReplVersionV1 = 1
batchReplVersion = batchReplVersionV1
batchJobName = "job.bin"
batchJobPrefix = "batch-jobs"
batchReplJobAPIVersion = "v1"
batchReplJobDefaultRetries = 3
batchReplJobDefaultRetryDelay = 250 * time.Millisecond
)
func (ri *batchJobInfo) load(ctx context.Context, api ObjectLayer, job BatchJobRequest) error {
var fileName string
var format, version uint16
switch {
case job.Replicate != nil:
fileName = batchReplName
version = batchReplVersionV1
format = batchReplFormat
case job.KeyRotate != nil:
fileName = batchKeyRotationName
version = batchKeyRotateVersionV1
format = batchKeyRotationFormat
}
data, err := readConfig(ctx, api, pathJoin(job.Location, fileName))
if err != nil {
if errors.Is(err, errConfigNotFound) || isErrObjectNotFound(err) {
ri.Version = int(version)
switch {
case job.Replicate != nil:
ri.RetryAttempts = batchReplJobDefaultRetries
if job.Replicate.Flags.Retry.Attempts > 0 {
ri.RetryAttempts = job.Replicate.Flags.Retry.Attempts
}
case job.KeyRotate != nil:
ri.RetryAttempts = batchKeyRotateJobDefaultRetries
if job.KeyRotate.Flags.Retry.Attempts > 0 {
ri.RetryAttempts = job.KeyRotate.Flags.Retry.Attempts
}
}
return nil
}
return err
}
if len(data) == 0 {
// Seems to be empty create a new batchRepl object.
return nil
}
if len(data) <= 4 {
return fmt.Errorf("%s: no data", ri.JobType)
}
// Read header
switch binary.LittleEndian.Uint16(data[0:2]) {
case format:
default:
return fmt.Errorf("%s: unknown format: %d", ri.JobType, binary.LittleEndian.Uint16(data[0:2]))
}
switch binary.LittleEndian.Uint16(data[2:4]) {
case version:
default:
return fmt.Errorf("%s: unknown version: %d", ri.JobType, binary.LittleEndian.Uint16(data[2:4]))
}
ri.mu.Lock()
defer ri.mu.Unlock()
// OK, parse data.
if _, err = ri.UnmarshalMsg(data[4:]); err != nil {
return err
}
switch ri.Version {
case batchReplVersionV1:
default:
return fmt.Errorf("unexpected batch %s meta version: %d", ri.JobType, ri.Version)
}
return nil
}
func (ri *batchJobInfo) clone() *batchJobInfo {
ri.mu.RLock()
defer ri.mu.RUnlock()
return &batchJobInfo{
Version: ri.Version,
JobID: ri.JobID,
JobType: ri.JobType,
RetryAttempts: ri.RetryAttempts,
Complete: ri.Complete,
Failed: ri.Failed,
StartTime: ri.StartTime,
LastUpdate: ri.LastUpdate,
Bucket: ri.Bucket,
Object: ri.Object,
Objects: ri.Objects,
ObjectsFailed: ri.ObjectsFailed,
BytesTransferred: ri.BytesTransferred,
BytesFailed: ri.BytesFailed,
}
}
func (ri *batchJobInfo) countItem(size int64, dmarker, success bool) {
if ri == nil {
return
}
if success {
if dmarker {
ri.DeleteMarkers++
} else {
ri.Objects++
ri.BytesTransferred += size
}
} else {
if dmarker {
ri.DeleteMarkersFailed++
} else {
ri.ObjectsFailed++
ri.BytesFailed += size
}
}
}
func (ri *batchJobInfo) updateAfter(ctx context.Context, api ObjectLayer, duration time.Duration, job BatchJobRequest) error {
if ri == nil {
return errInvalidArgument
}
now := UTCNow()
ri.mu.Lock()
var (
format, version uint16
jobTyp, fileName string
)
if now.Sub(ri.LastUpdate) >= duration {
switch job.Type() {
case madmin.BatchJobReplicate:
format = batchReplFormat
version = batchReplVersion
jobTyp = string(job.Type())
fileName = batchReplName
ri.Version = batchReplVersionV1
case madmin.BatchJobKeyRotate:
format = batchKeyRotationFormat
version = batchKeyRotateVersion
jobTyp = string(job.Type())
fileName = batchKeyRotationName
ri.Version = batchKeyRotateVersionV1
default:
return errInvalidArgument
}
if serverDebugLog {
console.Debugf("%s: persisting info on drive: threshold:%s, %s:%#v\n", jobTyp, now.Sub(ri.LastUpdate), jobTyp, ri)
}
ri.LastUpdate = now
data := make([]byte, 4, ri.Msgsize()+4)
// Initialize the header.
binary.LittleEndian.PutUint16(data[0:2], format)
binary.LittleEndian.PutUint16(data[2:4], version)
buf, err := ri.MarshalMsg(data)
ri.mu.Unlock()
if err != nil {
return err
}
return saveConfig(ctx, api, pathJoin(job.Location, fileName), buf)
}
ri.mu.Unlock()
return nil
}
func (ri *batchJobInfo) trackCurrentBucketObject(bucket string, info ObjectInfo, failed bool) {
if ri == nil {
return
}
ri.mu.Lock()
defer ri.mu.Unlock()
ri.Bucket = bucket
ri.Object = info.Name
ri.countItem(info.Size, info.DeleteMarker, failed)
}
// Start start the batch replication job, resumes if there was a pending job via "job.ID"
func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job BatchJobRequest) error {
ri := &batchJobInfo{
JobID: job.ID,
JobType: string(job.Type()),
StartTime: job.Started,
}
if err := ri.load(ctx, api, job); err != nil {
return err
}
globalBatchJobsMetrics.save(job.ID, ri)
lastObject := ri.Object
delay := job.Replicate.Flags.Retry.Delay
if delay == 0 {
delay = batchReplJobDefaultRetryDelay
}
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
skip := func(info FileInfo) (ok bool) {
if r.Flags.Filter.OlderThan > 0 && time.Since(info.ModTime) < r.Flags.Filter.OlderThan {
// skip all objects that are newer than specified older duration
return false
}
if r.Flags.Filter.NewerThan > 0 && time.Since(info.ModTime) >= r.Flags.Filter.NewerThan {
// skip all objects that are older than specified newer duration
return false
}
if !r.Flags.Filter.CreatedAfter.IsZero() && r.Flags.Filter.CreatedAfter.Before(info.ModTime) {
// skip all objects that are created before the specified time.
return false
}
if !r.Flags.Filter.CreatedBefore.IsZero() && r.Flags.Filter.CreatedBefore.After(info.ModTime) {
// skip all objects that are created after the specified time.
return false
}
if len(r.Flags.Filter.Tags) > 0 {
// Only parse object tags if tags filter is specified.
tagMap := map[string]string{}
tagStr := info.Metadata[xhttp.AmzObjectTagging]
if len(tagStr) != 0 {
t, err := tags.ParseObjectTags(tagStr)
if err != nil {
return false
}
tagMap = t.ToMap()
}
for _, kv := range r.Flags.Filter.Tags {
for t, v := range tagMap {
if kv.Match(BatchJobReplicateKV{Key: t, Value: v}) {
return true
}
}
}
// None of the provided tags filter match skip the object
return false
}
if len(r.Flags.Filter.Metadata) > 0 {
for _, kv := range r.Flags.Filter.Metadata {
for k, v := range info.Metadata {
if !strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") && !isStandardHeader(k) {
continue
}
// We only need to match x-amz-meta or standardHeaders
if kv.Match(BatchJobReplicateKV{Key: k, Value: v}) {
return true
}
}
}
// None of the provided metadata filters match skip the object.
return false
}
// if one of source or target is non MinIO, just replicate the top most version like `mc mirror`
if (r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3) && !info.IsLatest {
return false
}
return true
}
u, err := url.Parse(r.Target.Endpoint)
if err != nil {
return err
}
cred := r.Target.Creds
c, err := miniogo.NewCore(u.Host, &miniogo.Options{
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
Secure: u.Scheme == "https",
Transport: getRemoteInstanceTransport,
BucketLookup: lookupStyle(r.Target.Path),
})
if err != nil {
return err
}
c.SetAppInfo("minio-"+batchJobPrefix, r.APIVersion+" "+job.ID)
workerSize, err := strconv.Atoi(env.Get("_MINIO_BATCH_REPLICATION_WORKERS", strconv.Itoa(runtime.GOMAXPROCS(0)/2)))
if err != nil {
return err
}
wk, err := workers.New(workerSize)
if err != nil {
// invalid worker size.
return err
}
retryAttempts := ri.RetryAttempts
retry := false
for attempts := 1; attempts <= retryAttempts; attempts++ {
attempts := attempts
ctx, cancel := context.WithCancel(ctx)
// one of source/target is s3, skip delete marker and all versions under the same object name.
s3Type := r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3
results := make(chan ObjectInfo, 100)
if err := api.Walk(ctx, r.Source.Bucket, r.Source.Prefix, results, ObjectOptions{
WalkMarker: lastObject,
WalkFilter: skip,
}); err != nil {
cancel()
// Do not need to retry if we can't list objects on source.
return err
}
prevObj := ""
skipReplicate := false
for result := range results {
result := result
if result.Name != prevObj {
prevObj = result.Name
skipReplicate = result.DeleteMarker && s3Type
}
if skipReplicate {
continue
}
wk.Take()
go func() {
defer wk.Give()
stopFn := globalBatchJobsMetrics.trace(batchReplicationMetricObject, job.ID, attempts, result)
success := true
if err := r.ReplicateToTarget(ctx, api, c, result, retry); err != nil {
if miniogo.ToErrorResponse(err).Code == "PreconditionFailed" {
// pre-condition failed means we already have the object copied over.
return
}
// object must be deleted concurrently, allow these failures but do not count them
if isErrVersionNotFound(err) || isErrObjectNotFound(err) {
return
}
stopFn(err)
logger.LogIf(ctx, err)
success = false
} else {
stopFn(nil)
}
ri.trackCurrentBucketObject(r.Source.Bucket, result, success)
globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk after every 10secs.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
}()
}
wk.Wait()
ri.RetryAttempts = attempts
ri.Complete = ri.ObjectsFailed == 0
ri.Failed = ri.ObjectsFailed > 0
globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
buf, _ := json.Marshal(ri)
if err := r.Notify(ctx, bytes.NewReader(buf)); err != nil {
logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err))
}
cancel()
if ri.Failed {
ri.ObjectsFailed = 0
ri.Bucket = ""
ri.Object = ""
ri.Objects = 0
ri.BytesFailed = 0
ri.BytesTransferred = 0
retry = true // indicate we are retrying..
time.Sleep(delay + time.Duration(rnd.Float64()*float64(delay)))
continue
}
break
}
return nil
}
//msgp:ignore batchReplicationJobError
type batchReplicationJobError struct {
Code string
Description string
HTTPStatusCode int
}
func (e batchReplicationJobError) Error() string {
return e.Description
}
// Validate validates the job definition input
func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest, o ObjectLayer) error {
if r == nil {
return nil
}
if r.APIVersion != batchReplJobAPIVersion {
return errInvalidArgument
}
if r.Source.Bucket == "" {
return errInvalidArgument
}
var isRemoteToLocal bool
localBkt := r.Source.Bucket
if r.Source.Endpoint != "" {
localBkt = r.Target.Bucket
isRemoteToLocal = true
}
info, err := o.GetBucketInfo(ctx, localBkt, BucketOptions{})
if err != nil {
if isErrBucketNotFound(err) {
return batchReplicationJobError{
Code: "NoSuchSourceBucket",
Description: fmt.Sprintf("The specified bucket %s does not exist", localBkt),
HTTPStatusCode: http.StatusNotFound,
}
}
return err
}
if err := r.Source.Type.Validate(); err != nil {
return err
}
if r.Source.Creds.Empty() && r.Target.Creds.Empty() {
return errInvalidArgument
}
if !r.Source.Creds.Empty() {
if err := r.Source.Creds.Validate(); err != nil {
return err
}
}
if r.Target.Endpoint == "" && !r.Target.Creds.Empty() {
return errInvalidArgument
}
if r.Source.Endpoint == "" && !r.Source.Creds.Empty() {
return errInvalidArgument
}
if r.Source.Endpoint != "" && !r.Source.Type.isMinio() && !r.Source.ValidPath() {
return errInvalidArgument
}
if r.Target.Endpoint != "" && !r.Target.Type.isMinio() && !r.Target.ValidPath() {
return errInvalidArgument
}
if r.Target.Bucket == "" {
return errInvalidArgument
}
if !r.Target.Creds.Empty() {
if err := r.Target.Creds.Validate(); err != nil {
return err
}
}
if r.Source.Creds.Empty() && r.Target.Creds.Empty() {
return errInvalidArgument
}
if err := r.Target.Type.Validate(); err != nil {
return err
}
for _, tag := range r.Flags.Filter.Tags {
if err := tag.Validate(); err != nil {
return err
}
}
for _, meta := range r.Flags.Filter.Metadata {
if err := meta.Validate(); err != nil {
return err
}
}
if err := r.Flags.Retry.Validate(); err != nil {
return err
}
remoteEp := r.Target.Endpoint
remoteBkt := r.Target.Bucket
cred := r.Target.Creds
pathStyle := r.Target.Path
if r.Source.Endpoint != "" {
remoteEp = r.Source.Endpoint
cred = r.Source.Creds
remoteBkt = r.Source.Bucket
pathStyle = r.Source.Path
}
u, err := url.Parse(remoteEp)
if err != nil {
return err
}
c, err := miniogo.NewCore(u.Host, &miniogo.Options{
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
Secure: u.Scheme == "https",
Transport: getRemoteInstanceTransport,
BucketLookup: lookupStyle(pathStyle),
})
if err != nil {
return err
}
c.SetAppInfo("minio-"+batchJobPrefix, r.APIVersion+" "+job.ID)
vcfg, err := c.GetBucketVersioning(ctx, remoteBkt)
if err != nil {
if miniogo.ToErrorResponse(err).Code == "NoSuchBucket" {
return batchReplicationJobError{
Code: "NoSuchTargetBucket",
Description: "The specified target bucket does not exist",
HTTPStatusCode: http.StatusNotFound,
}
}
return err
}
// if both source and target are minio instances
minioType := r.Target.Type == BatchJobReplicateResourceMinIO && r.Source.Type == BatchJobReplicateResourceMinIO
// If source has versioning enabled, target must have versioning enabled
if minioType && ((info.Versioning && !vcfg.Enabled() && !isRemoteToLocal) || (!info.Versioning && vcfg.Enabled() && isRemoteToLocal)) {
return batchReplicationJobError{
Code: "InvalidBucketState",
Description: fmt.Sprintf("The source '%s' has versioning enabled, target '%s' must have versioning enabled",
r.Source.Bucket, r.Target.Bucket),
HTTPStatusCode: http.StatusBadRequest,
}
}
r.clnt = c
return nil
}
// Type returns type of batch job, currently only supports 'replicate'
func (j BatchJobRequest) Type() madmin.BatchJobType {
switch {
case j.Replicate != nil:
return madmin.BatchJobReplicate
case j.KeyRotate != nil:
return madmin.BatchJobKeyRotate
}
return madmin.BatchJobType("unknown")
}
// Validate validates the current job, used by 'save()' before
// persisting the job request
func (j BatchJobRequest) Validate(ctx context.Context, o ObjectLayer) error {
switch {
case j.Replicate != nil:
return j.Replicate.Validate(ctx, j, o)
case j.KeyRotate != nil:
return j.KeyRotate.Validate(ctx, j, o)
}
return errInvalidArgument
}
func (j BatchJobRequest) delete(ctx context.Context, api ObjectLayer) {
switch {
case j.Replicate != nil:
deleteConfig(ctx, api, pathJoin(j.Location, batchReplName))
case j.KeyRotate != nil:
deleteConfig(ctx, api, pathJoin(j.Location, batchKeyRotationName))
}
globalBatchJobsMetrics.delete(j.ID)
deleteConfig(ctx, api, j.Location)
}
func (j *BatchJobRequest) save(ctx context.Context, api ObjectLayer) error {
if j.Replicate == nil && j.KeyRotate == nil {
return errInvalidArgument
}
if err := j.Validate(ctx, api); err != nil {
return err
}
j.Location = pathJoin(batchJobPrefix, j.ID)
job, err := j.MarshalMsg(nil)
if err != nil {
return err
}
return saveConfig(ctx, api, j.Location, job)
}
func (j *BatchJobRequest) load(ctx context.Context, api ObjectLayer, name string) error {
if j == nil {
return nil
}
job, err := readConfig(ctx, api, name)
if err != nil {
if errors.Is(err, errConfigNotFound) || isErrObjectNotFound(err) {
err = errNoSuchJob
}
return err
}
_, err = j.UnmarshalMsg(job)
return err
}
func batchReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions, err error) {
// TODO: support custom storage class for remote replication
putOpts, err = putReplicationOpts(ctx, "", objInfo)
if err != nil {
return putOpts, err
}
putOpts.Internal = miniogo.AdvancedPutOptions{
SourceVersionID: objInfo.VersionID,
SourceMTime: objInfo.ModTime,
SourceETag: objInfo.ETag,
ReplicationRequest: true,
}
return putOpts, nil
}
// ListBatchJobs - lists all currently active batch jobs, optionally takes {jobType}
// input to list only active batch jobs of 'jobType'
func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListBatchJobs")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ListBatchJobsAction)
if objectAPI == nil {
return
}
jobType := r.Form.Get("jobType")
if jobType == "" {
jobType = string(madmin.BatchJobReplicate)
}
resultCh := make(chan ObjectInfo)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if err := objectAPI.Walk(ctx, minioMetaBucket, batchJobPrefix, resultCh, ObjectOptions{}); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
listResult := madmin.ListBatchJobsResult{}
for result := range resultCh {
req := &BatchJobRequest{}
if err := req.load(ctx, objectAPI, result.Name); err != nil {
if !errors.Is(err, errNoSuchJob) {
logger.LogIf(ctx, err)
}
continue
}
if jobType == string(req.Type()) {
listResult.Jobs = append(listResult.Jobs, madmin.BatchJobResult{
ID: req.ID,
Type: req.Type(),
Started: req.Started,
User: req.User,
Elapsed: time.Since(req.Started),
})
}
}
logger.LogIf(ctx, json.NewEncoder(w).Encode(&listResult))
}
var errNoSuchJob = errors.New("no such job")
// DescribeBatchJob returns the currently active batch job definition
func (a adminAPIHandlers) DescribeBatchJob(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DescribeBatchJob")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.DescribeBatchJobAction)
if objectAPI == nil {
return
}
id := r.Form.Get("jobId")
if id == "" {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL)
return
}
req := &BatchJobRequest{}
if err := req.load(ctx, objectAPI, pathJoin(batchJobPrefix, id)); err != nil {
if !errors.Is(err, errNoSuchJob) {
logger.LogIf(ctx, err)
}
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
buf, err := yaml.Marshal(req)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
w.Write(buf)
}
// StarBatchJob queue a new job for execution
func (a adminAPIHandlers) StartBatchJob(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "StartBatchJob")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, creds := validateAdminReq(ctx, w, r, iampolicy.StartBatchJobAction)
if objectAPI == nil {
return
}
buf, err := io.ReadAll(r.Body)
if err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
user := creds.AccessKey
if creds.ParentUser != "" {
user = creds.ParentUser
}
job := &BatchJobRequest{}
if err = yaml.Unmarshal(buf, job); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
job.ID = shortuuid.New()
job.User = user
job.Started = time.Now()
if err := job.save(ctx, objectAPI); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if err = globalBatchJobPool.queueJob(job); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
buf, err = json.Marshal(&madmin.BatchJobResult{
ID: job.ID,
Type: job.Type(),
Started: job.Started,
User: job.User,
})
if err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, buf)
}
// CancelBatchJob cancels a job in progress
func (a adminAPIHandlers) CancelBatchJob(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "CancelBatchJob")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.CancelBatchJobAction)
if objectAPI == nil {
return
}
jobID := r.Form.Get("id")
if jobID == "" {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL)
return
}
if err := globalBatchJobPool.canceler(jobID, true); err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrInvalidRequest, err), r.URL)
return
}
j := BatchJobRequest{
ID: jobID,
Location: pathJoin(batchJobPrefix, jobID),
}
j.delete(ctx, objectAPI)
writeSuccessNoContent(w)
}
//msgp:ignore BatchJobPool
// BatchJobPool batch job pool
type BatchJobPool struct {
ctx context.Context
objLayer ObjectLayer
once sync.Once
mu sync.Mutex
jobCh chan *BatchJobRequest
jmu sync.Mutex // protects jobCancelers
jobCancelers map[string]context.CancelFunc
workerKillCh chan struct{}
workerSize int
}
var globalBatchJobPool *BatchJobPool
// newBatchJobPool creates a pool of job manifest workers of specified size
func newBatchJobPool(ctx context.Context, o ObjectLayer, workers int) *BatchJobPool {
jpool := &BatchJobPool{
ctx: ctx,
objLayer: o,
jobCh: make(chan *BatchJobRequest, 10000),
workerKillCh: make(chan struct{}, workers),
jobCancelers: make(map[string]context.CancelFunc),
}
jpool.ResizeWorkers(workers)
jpool.resume()
return jpool
}
func (j *BatchJobPool) resume() {
results := make(chan ObjectInfo, 100)
ctx, cancel := context.WithCancel(j.ctx)
defer cancel()
if err := j.objLayer.Walk(ctx, minioMetaBucket, batchJobPrefix, results, ObjectOptions{}); err != nil {
logger.LogIf(j.ctx, err)
return
}
for result := range results {
// ignore batch-replicate.bin and batch-rotate.bin entries
if strings.HasSuffix(result.Name, slashSeparator) {
continue
}
req := &BatchJobRequest{}
if err := req.load(ctx, j.objLayer, result.Name); err != nil {
logger.LogIf(ctx, err)
continue
}
if err := j.queueJob(req); err != nil {
logger.LogIf(ctx, err)
continue
}
}
}
// AddWorker adds a replication worker to the pool
func (j *BatchJobPool) AddWorker() {
if j == nil {
return
}
for {
select {
case <-j.ctx.Done():
return
case job, ok := <-j.jobCh:
if !ok {
return
}
if job.Replicate != nil {
if job.Replicate.RemoteToLocal() {
if err := job.Replicate.StartFromSource(job.ctx, j.objLayer, *job); err != nil {
if !isErrBucketNotFound(err) {
logger.LogIf(j.ctx, err)
j.canceler(job.ID, false)
continue
}
// Bucket not found proceed to delete such a job.
}
} else {
if err := job.Replicate.Start(job.ctx, j.objLayer, *job); err != nil {
if !isErrBucketNotFound(err) {
logger.LogIf(j.ctx, err)
j.canceler(job.ID, false)
continue
}
// Bucket not found proceed to delete such a job.
}
}
}
if job.KeyRotate != nil {
if err := job.KeyRotate.Start(job.ctx, j.objLayer, *job); err != nil {
if !isErrBucketNotFound(err) {
logger.LogIf(j.ctx, err)
continue
}
}
}
job.delete(j.ctx, j.objLayer)
j.canceler(job.ID, false)
case <-j.workerKillCh:
return
}
}
}
// ResizeWorkers sets replication workers pool to new size
func (j *BatchJobPool) ResizeWorkers(n int) {
if j == nil {
return
}
j.mu.Lock()
defer j.mu.Unlock()
for j.workerSize < n {
j.workerSize++
go j.AddWorker()
}
for j.workerSize > n {
j.workerSize--
go func() { j.workerKillCh <- struct{}{} }()
}
}
func (j *BatchJobPool) queueJob(req *BatchJobRequest) error {
if j == nil {
return errInvalidArgument
}
jctx, jcancel := context.WithCancel(j.ctx)
j.jmu.Lock()
j.jobCancelers[req.ID] = jcancel
j.jmu.Unlock()
req.ctx = jctx
select {
case <-j.ctx.Done():
j.once.Do(func() {
close(j.jobCh)
})
case j.jobCh <- req:
default:
return fmt.Errorf("batch job queue is currently full please try again later %#v", req)
}
return nil
}
// delete canceler from the map, cancel job if requested
func (j *BatchJobPool) canceler(jobID string, cancel bool) error {
if j == nil {
return errInvalidArgument
}
j.jmu.Lock()
defer j.jmu.Unlock()
if canceler, ok := j.jobCancelers[jobID]; ok {
if cancel {
canceler()
}
}
delete(j.jobCancelers, jobID)
return nil
}
//msgp:ignore batchJobMetrics
type batchJobMetrics struct {
sync.RWMutex
metrics map[string]*batchJobInfo
}
var globalBatchJobsMetrics = batchJobMetrics{
metrics: make(map[string]*batchJobInfo),
}
//msgp:ignore batchJobMetric
//go:generate stringer -type=batchJobMetric -trimprefix=batchJobMetric $GOFILE
type batchJobMetric uint8
const (
batchReplicationMetricObject batchJobMetric = iota
batchKeyRotationMetricObject
)
func batchJobTrace(d batchJobMetric, job string, startTime time.Time, duration time.Duration, info ObjectInfo, attempts int, err error) madmin.TraceInfo {
var errStr string
if err != nil {
errStr = err.Error()
}
jobKind := "batchReplication"
traceType := madmin.TraceBatchReplication
if d == batchKeyRotationMetricObject {
jobKind = "batchKeyRotation"
traceType = madmin.TraceBatchKeyRotation
}
funcName := fmt.Sprintf("%s.%s (job-name=%s)", jobKind, d.String(), job)
if attempts > 0 {
funcName = fmt.Sprintf("%s.%s (job-name=%s,attempts=%s)", jobKind, d.String(), job, humanize.Ordinal(attempts))
}
return madmin.TraceInfo{
TraceType: traceType,
Time: startTime,
NodeName: globalLocalNodeName,
FuncName: funcName,
Duration: duration,
Path: info.Name,
Error: errStr,
}
}
func (m *batchJobMetrics) report(jobID string) (metrics *madmin.BatchJobMetrics) {
metrics = &madmin.BatchJobMetrics{CollectedAt: time.Now(), Jobs: make(map[string]madmin.JobMetric)}
m.RLock()
defer m.RUnlock()
for id, job := range m.metrics {
match := jobID != "" && id == jobID
metrics.Jobs[id] = madmin.JobMetric{
JobID: job.JobID,
JobType: job.JobType,
StartTime: job.StartTime,
LastUpdate: job.LastUpdate,
RetryAttempts: job.RetryAttempts,
Complete: job.Complete,
Failed: job.Failed,
Replicate: &madmin.ReplicateInfo{
Bucket: job.Bucket,
Object: job.Object,
Objects: job.Objects,
ObjectsFailed: job.ObjectsFailed,
BytesTransferred: job.BytesTransferred,
BytesFailed: job.BytesFailed,
},
KeyRotate: &madmin.KeyRotationInfo{
Bucket: job.Bucket,
Object: job.Object,
Objects: job.Objects,
ObjectsFailed: job.ObjectsFailed,
},
}
if match {
break
}
}
return metrics
}
func (m *batchJobMetrics) delete(jobID string) {
m.Lock()
defer m.Unlock()
delete(m.metrics, jobID)
}
func (m *batchJobMetrics) save(jobID string, ri *batchJobInfo) {
m.Lock()
defer m.Unlock()
m.metrics[jobID] = ri.clone()
}
func (m *batchJobMetrics) trace(d batchJobMetric, job string, attempts int, info ObjectInfo) func(err error) {
startTime := time.Now()
return func(err error) {
duration := time.Since(startTime)
switch d {
case batchReplicationMetricObject:
if globalTrace.NumSubscribers(madmin.TraceBatchReplication) > 0 {
globalTrace.Publish(batchJobTrace(d, job, startTime, duration, info, attempts, err))
}
case batchKeyRotationMetricObject:
if globalTrace.NumSubscribers(madmin.TraceBatchKeyRotation) > 0 {
globalTrace.Publish(batchJobTrace(d, job, startTime, duration, info, attempts, err))
}
}
}
}
func lookupStyle(s string) miniogo.BucketLookupType {
var lookup miniogo.BucketLookupType
switch s {
case "on":
lookup = miniogo.BucketLookupPath
case "off":
lookup = miniogo.BucketLookupDNS
default:
lookup = miniogo.BucketLookupAuto
}
return lookup
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"errors"
"fmt"
"io"
"math/rand"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/env"
)
//go:generate stringer -type=storageMetric -trimprefix=storageMetric $GOFILE
type storageMetric uint8
const (
storageMetricMakeVolBulk storageMetric = iota
storageMetricMakeVol
storageMetricListVols
storageMetricStatVol
storageMetricDeleteVol
storageMetricWalkDir
storageMetricListDir
storageMetricReadFile
storageMetricAppendFile
storageMetricCreateFile
storageMetricReadFileStream
storageMetricRenameFile
storageMetricRenameData
storageMetricCheckParts
storageMetricDelete
storageMetricDeleteVersions
storageMetricVerifyFile
storageMetricWriteAll
storageMetricDeleteVersion
storageMetricWriteMetadata
storageMetricUpdateMetadata
storageMetricReadVersion
storageMetricReadXL
storageMetricReadAll
storageMetricStatInfoFile
storageMetricReadMultiple
storageMetricDeleteAbandonedParts
storageMetricDiskInfo
// .... add more
storageMetricLast
)
// Detects change in underlying disk.
type xlStorageDiskIDCheck struct {
// apiCalls should be placed first so alignment is guaranteed for atomic operations.
apiCalls [storageMetricLast]uint64
apiLatencies [storageMetricLast]*lockedLastMinuteLatency
diskID string
storage *xlStorage
health *diskHealthTracker
metricsCache timedValue
}
func (p *xlStorageDiskIDCheck) getMetrics() DiskMetrics {
p.metricsCache.Once.Do(func() {
p.metricsCache.TTL = 100 * time.Millisecond
p.metricsCache.Update = func() (interface{}, error) {
diskMetric := DiskMetrics{
LastMinute: make(map[string]AccElem, len(p.apiLatencies)),
APICalls: make(map[string]uint64, len(p.apiCalls)),
}
for i, v := range p.apiLatencies {
diskMetric.LastMinute[storageMetric(i).String()] = v.total()
}
for i := range p.apiCalls {
diskMetric.APICalls[storageMetric(i).String()] = atomic.LoadUint64(&p.apiCalls[i])
}
return diskMetric, nil
}
})
m, _ := p.metricsCache.Get()
return m.(DiskMetrics)
}
type lockedLastMinuteLatency struct {
sync.Mutex
lastMinuteLatency
}
func (e *lockedLastMinuteLatency) add(value time.Duration) {
e.Lock()
defer e.Unlock()
e.lastMinuteLatency.add(value)
}
// addSize will add a duration and size.
func (e *lockedLastMinuteLatency) addSize(value time.Duration, sz int64) {
e.Lock()
defer e.Unlock()
e.lastMinuteLatency.addSize(value, sz)
}
// total returns the total call count and latency for the last minute.
func (e *lockedLastMinuteLatency) total() AccElem {
e.Lock()
defer e.Unlock()
return e.lastMinuteLatency.getTotal()
}
func newXLStorageDiskIDCheck(storage *xlStorage) *xlStorageDiskIDCheck {
xl := xlStorageDiskIDCheck{
storage: storage,
health: newDiskHealthTracker(),
}
for i := range xl.apiLatencies[:] {
xl.apiLatencies[i] = &lockedLastMinuteLatency{}
}
return &xl
}
func (p *xlStorageDiskIDCheck) String() string {
return p.storage.String()
}
func (p *xlStorageDiskIDCheck) IsOnline() bool {
storedDiskID, err := p.storage.GetDiskID()
if err != nil {
return false
}
return storedDiskID == p.diskID
}
func (p *xlStorageDiskIDCheck) LastConn() time.Time {
return p.storage.LastConn()
}
func (p *xlStorageDiskIDCheck) IsLocal() bool {
return p.storage.IsLocal()
}
func (p *xlStorageDiskIDCheck) Endpoint() Endpoint {
return p.storage.Endpoint()
}
func (p *xlStorageDiskIDCheck) Hostname() string {
return p.storage.Hostname()
}
func (p *xlStorageDiskIDCheck) Healing() *healingTracker {
return p.storage.Healing()
}
func (p *xlStorageDiskIDCheck) NSScanner(ctx context.Context, cache dataUsageCache, updates chan<- dataUsageEntry, scanMode madmin.HealScanMode) (dataUsageCache, error) {
if contextCanceled(ctx) {
close(updates)
return dataUsageCache{}, ctx.Err()
}
if err := p.checkDiskStale(); err != nil {
close(updates)
return dataUsageCache{}, err
}
return p.storage.NSScanner(ctx, cache, updates, scanMode)
}
func (p *xlStorageDiskIDCheck) GetDiskLoc() (poolIdx, setIdx, diskIdx int) {
return p.storage.GetDiskLoc()
}
func (p *xlStorageDiskIDCheck) SetDiskLoc(poolIdx, setIdx, diskIdx int) {
p.storage.SetDiskLoc(poolIdx, setIdx, diskIdx)
}
func (p *xlStorageDiskIDCheck) Close() error {
return p.storage.Close()
}
func (p *xlStorageDiskIDCheck) GetDiskID() (string, error) {
return p.storage.GetDiskID()
}
func (p *xlStorageDiskIDCheck) SetDiskID(id string) {
p.diskID = id
}
func (p *xlStorageDiskIDCheck) checkDiskStale() error {
if p.diskID == "" {
// For empty disk-id we allow the call as the server might be
// coming up and trying to read format.json or create format.json
return nil
}
storedDiskID, err := p.storage.GetDiskID()
if err != nil {
// return any error generated while reading `format.json`
return err
}
if err == nil && p.diskID == storedDiskID {
return nil
}
// not the same disk we remember, take it offline.
return errDiskNotFound
}
func (p *xlStorageDiskIDCheck) DiskInfo(ctx context.Context) (info DiskInfo, err error) {
if contextCanceled(ctx) {
return DiskInfo{}, ctx.Err()
}
si := p.updateStorageMetrics(storageMetricDiskInfo)
defer si(&err)
info, err = p.storage.DiskInfo(ctx)
if err != nil {
return info, err
}
info.Metrics = p.getMetrics()
// check cached diskID against backend
// only if its non-empty.
if p.diskID != "" {
if p.diskID != info.ID {
return info, errDiskNotFound
}
}
if p.health.isFaulty() {
// if disk is already faulty return faulty for 'mc admin info' output and prometheus alerts.
return info, errFaultyDisk
}
return info, nil
}
func (p *xlStorageDiskIDCheck) MakeVolBulk(ctx context.Context, volumes ...string) (err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricMakeVolBulk, volumes...)
if err != nil {
return err
}
defer done(&err)
return p.storage.MakeVolBulk(ctx, volumes...)
}
func (p *xlStorageDiskIDCheck) MakeVol(ctx context.Context, volume string) (err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricMakeVol, volume)
if err != nil {
return err
}
defer done(&err)
if contextCanceled(ctx) {
return ctx.Err()
}
if err = p.checkDiskStale(); err != nil {
return err
}
return p.storage.MakeVol(ctx, volume)
}
func (p *xlStorageDiskIDCheck) ListVols(ctx context.Context) (vi []VolInfo, err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricListVols, "/")
if err != nil {
return nil, err
}
defer done(&err)
return p.storage.ListVols(ctx)
}
func (p *xlStorageDiskIDCheck) StatVol(ctx context.Context, volume string) (vol VolInfo, err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricStatVol, volume)
if err != nil {
return vol, err
}
defer done(&err)
return p.storage.StatVol(ctx, volume)
}
func (p *xlStorageDiskIDCheck) DeleteVol(ctx context.Context, volume string, forceDelete bool) (err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricDeleteVol, volume)
if err != nil {
return err
}
defer done(&err)
return p.storage.DeleteVol(ctx, volume, forceDelete)
}
func (p *xlStorageDiskIDCheck) ListDir(ctx context.Context, volume, dirPath string, count int) (s []string, err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricListDir, volume, dirPath)
if err != nil {
return nil, err
}
defer done(&err)
return p.storage.ListDir(ctx, volume, dirPath, count)
}
func (p *xlStorageDiskIDCheck) ReadFile(ctx context.Context, volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricReadFile, volume, path)
if err != nil {
return 0, err
}
defer done(&err)
return p.storage.ReadFile(ctx, volume, path, offset, buf, verifier)
}
func (p *xlStorageDiskIDCheck) AppendFile(ctx context.Context, volume string, path string, buf []byte) (err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricAppendFile, volume, path)
if err != nil {
return err
}
defer done(&err)
return p.storage.AppendFile(ctx, volume, path, buf)
}
func (p *xlStorageDiskIDCheck) CreateFile(ctx context.Context, volume, path string, size int64, reader io.Reader) (err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricCreateFile, volume, path)
if err != nil {
return err
}
defer done(&err)
return p.storage.CreateFile(ctx, volume, path, size, reader)
}
func (p *xlStorageDiskIDCheck) ReadFileStream(ctx context.Context, volume, path string, offset, length int64) (io.ReadCloser, error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricReadFileStream, volume, path)
if err != nil {
return nil, err
}
defer done(&err)
return p.storage.ReadFileStream(ctx, volume, path, offset, length)
}
func (p *xlStorageDiskIDCheck) RenameFile(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string) (err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricRenameFile, srcVolume, srcPath, dstVolume, dstPath)
if err != nil {
return err
}
defer done(&err)
return p.storage.RenameFile(ctx, srcVolume, srcPath, dstVolume, dstPath)
}
func (p *xlStorageDiskIDCheck) RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string) (sign uint64, err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricRenameData, srcPath, fi.DataDir, dstVolume, dstPath)
if err != nil {
return 0, err
}
defer done(&err)
return p.storage.RenameData(ctx, srcVolume, srcPath, fi, dstVolume, dstPath)
}
func (p *xlStorageDiskIDCheck) CheckParts(ctx context.Context, volume string, path string, fi FileInfo) (err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricCheckParts, volume, path)
if err != nil {
return err
}
defer done(&err)
return p.storage.CheckParts(ctx, volume, path, fi)
}
func (p *xlStorageDiskIDCheck) Delete(ctx context.Context, volume string, path string, deleteOpts DeleteOptions) (err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricDelete, volume, path)
if err != nil {
return err
}
defer done(&err)
return p.storage.Delete(ctx, volume, path, deleteOpts)
}
// DeleteVersions deletes slice of versions, it can be same object
// or multiple objects.
func (p *xlStorageDiskIDCheck) DeleteVersions(ctx context.Context, volume string, versions []FileInfoVersions) (errs []error) {
// Merely for tracing storage
path := ""
if len(versions) > 0 {
path = versions[0].Name
}
errs = make([]error, len(versions))
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricDeleteVersions, volume, path)
if err != nil {
for i := range errs {
errs[i] = ctx.Err()
}
return errs
}
defer done(&err)
errs = p.storage.DeleteVersions(ctx, volume, versions)
for i := range errs {
if errs[i] != nil {
err = errs[i]
break
}
}
return errs
}
func (p *xlStorageDiskIDCheck) VerifyFile(ctx context.Context, volume, path string, fi FileInfo) (err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricVerifyFile, volume, path)
if err != nil {
return err
}
defer done(&err)
return p.storage.VerifyFile(ctx, volume, path, fi)
}
func (p *xlStorageDiskIDCheck) WriteAll(ctx context.Context, volume string, path string, b []byte) (err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricWriteAll, volume, path)
if err != nil {
return err
}
defer done(&err)
return p.storage.WriteAll(ctx, volume, path, b)
}
func (p *xlStorageDiskIDCheck) DeleteVersion(ctx context.Context, volume, path string, fi FileInfo, forceDelMarker bool) (err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricDeleteVersion, volume, path)
if err != nil {
return err
}
defer done(&err)
return p.storage.DeleteVersion(ctx, volume, path, fi, forceDelMarker)
}
func (p *xlStorageDiskIDCheck) UpdateMetadata(ctx context.Context, volume, path string, fi FileInfo) (err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricUpdateMetadata, volume, path)
if err != nil {
return err
}
defer done(&err)
return p.storage.UpdateMetadata(ctx, volume, path, fi)
}
func (p *xlStorageDiskIDCheck) WriteMetadata(ctx context.Context, volume, path string, fi FileInfo) (err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricWriteMetadata, volume, path)
if err != nil {
return err
}
defer done(&err)
return p.storage.WriteMetadata(ctx, volume, path, fi)
}
func (p *xlStorageDiskIDCheck) ReadVersion(ctx context.Context, volume, path, versionID string, readData bool) (fi FileInfo, err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricReadVersion, volume, path)
if err != nil {
return fi, err
}
defer done(&err)
return p.storage.ReadVersion(ctx, volume, path, versionID, readData)
}
func (p *xlStorageDiskIDCheck) ReadAll(ctx context.Context, volume string, path string) (buf []byte, err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricReadAll, volume, path)
if err != nil {
return nil, err
}
defer done(&err)
return p.storage.ReadAll(ctx, volume, path)
}
func (p *xlStorageDiskIDCheck) ReadXL(ctx context.Context, volume string, path string, readData bool) (rf RawFileInfo, err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricReadXL, volume, path)
if err != nil {
return RawFileInfo{}, err
}
defer done(&err)
return p.storage.ReadXL(ctx, volume, path, readData)
}
func (p *xlStorageDiskIDCheck) StatInfoFile(ctx context.Context, volume, path string, glob bool) (stat []StatInfo, err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricStatInfoFile, volume, path)
if err != nil {
return nil, err
}
defer done(&err)
return p.storage.StatInfoFile(ctx, volume, path, glob)
}
// ReadMultiple will read multiple files and send each back as response.
// Files are read and returned in the given order.
// The resp channel is closed before the call returns.
// Only a canceled context will return an error.
func (p *xlStorageDiskIDCheck) ReadMultiple(ctx context.Context, req ReadMultipleReq, resp chan<- ReadMultipleResp) error {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricReadMultiple, req.Bucket, req.Prefix)
if err != nil {
close(resp)
return err
}
defer done(&err)
return p.storage.ReadMultiple(ctx, req, resp)
}
// CleanAbandonedData will read metadata of the object on disk
// and delete any data directories and inline data that isn't referenced in metadata.
func (p *xlStorageDiskIDCheck) CleanAbandonedData(ctx context.Context, volume string, path string) error {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricDeleteAbandonedParts, volume, path)
if err != nil {
return err
}
defer done(&err)
return p.storage.CleanAbandonedData(ctx, volume, path)
}
func storageTrace(s storageMetric, startTime time.Time, duration time.Duration, path string, err string) madmin.TraceInfo {
return madmin.TraceInfo{
TraceType: madmin.TraceStorage,
Time: startTime,
NodeName: globalLocalNodeName,
FuncName: "storage." + s.String(),
Duration: duration,
Path: path,
Error: err,
}
}
func scannerTrace(s scannerMetric, startTime time.Time, duration time.Duration, path string, custom map[string]string) madmin.TraceInfo {
return madmin.TraceInfo{
TraceType: madmin.TraceScanner,
Time: startTime,
NodeName: globalLocalNodeName,
FuncName: "scanner." + s.String(),
Duration: duration,
Path: path,
Custom: custom,
}
}
// Update storage metrics
func (p *xlStorageDiskIDCheck) updateStorageMetrics(s storageMetric, paths ...string) func(err *error) {
startTime := time.Now()
trace := globalTrace.NumSubscribers(madmin.TraceStorage) > 0
return func(errp *error) {
duration := time.Since(startTime)
atomic.AddUint64(&p.apiCalls[s], 1)
p.apiLatencies[s].add(duration)
if trace {
var errStr string
if errp != nil && *errp != nil {
errStr = (*errp).Error()
}
paths = append([]string{p.String()}, paths...)
globalTrace.Publish(storageTrace(s, startTime, duration, strings.Join(paths, " "), errStr))
}
}
}
const (
diskHealthOK = iota
diskHealthFaulty
)
// diskMaxConcurrent is the maximum number of running concurrent operations
// for local and (incoming) remote disk ops respectively.
var diskMaxConcurrent = 512
func init() {
s := env.Get("_MINIO_DISK_MAX_CONCURRENT", "512")
diskMaxConcurrent, _ = strconv.Atoi(s)
if diskMaxConcurrent <= 0 {
logger.Info("invalid _MINIO_DISK_MAX_CONCURRENT value: %s, defaulting to '512'", s)
diskMaxConcurrent = 512
}
}
type diskHealthTracker struct {
// atomic time of last success
lastSuccess int64
// atomic time of last time a token was grabbed.
lastStarted int64
// Atomic status of disk.
status int32
// Atomic number of requests blocking for a token.
blocked int32
// Concurrency tokens.
tokens chan struct{}
}
// newDiskHealthTracker creates a new disk health tracker.
func newDiskHealthTracker() *diskHealthTracker {
d := diskHealthTracker{
lastSuccess: time.Now().UnixNano(),
lastStarted: time.Now().UnixNano(),
status: diskHealthOK,
tokens: make(chan struct{}, diskMaxConcurrent),
}
for i := 0; i < diskMaxConcurrent; i++ {
d.tokens <- struct{}{}
}
return &d
}
// logSuccess will update the last successful operation time.
func (d *diskHealthTracker) logSuccess() {
atomic.StoreInt64(&d.lastSuccess, time.Now().UnixNano())
}
func (d *diskHealthTracker) isFaulty() bool {
return atomic.LoadInt32(&d.status) == diskHealthFaulty
}
type (
healthDiskCtxKey struct{}
healthDiskCtxValue struct {
lastSuccess *int64
}
)
// logSuccess will update the last successful operation time.
func (h *healthDiskCtxValue) logSuccess() {
atomic.StoreInt64(h.lastSuccess, time.Now().UnixNano())
}
// noopDoneFunc is a no-op done func.
// Can be reused.
var noopDoneFunc = func(_ *error) {}
// TrackDiskHealth for this request.
// When a non-nil error is returned 'done' MUST be called
// with the status of the response, if it corresponds to disk health.
// If the pointer sent to done is non-nil AND the error
// is either nil or io.EOF the disk is considered good.
// So if unsure if the disk status is ok, return nil as a parameter to done.
// Shadowing will work as long as return error is named: https://go.dev/play/p/sauq86SsTN2
func (p *xlStorageDiskIDCheck) TrackDiskHealth(ctx context.Context, s storageMetric, paths ...string) (c context.Context, done func(*error), err error) {
done = noopDoneFunc
if contextCanceled(ctx) {
return ctx, done, ctx.Err()
}
// Return early if disk is faulty already.
if atomic.LoadInt32(&p.health.status) == diskHealthFaulty {
return ctx, done, errFaultyDisk
}
// Verify if the disk is not stale
// - missing format.json (unformatted drive)
// - format.json is valid but invalid 'uuid'
if err = p.checkDiskStale(); err != nil {
return ctx, done, err
}
// Disallow recursive tracking to avoid deadlocks.
if ctx.Value(healthDiskCtxKey{}) != nil {
done = p.updateStorageMetrics(s, paths...)
return ctx, done, nil
}
select {
case <-ctx.Done():
return ctx, done, ctx.Err()
case <-p.health.tokens:
// Fast path, got token.
default:
// We ran out of tokens, check health before blocking.
err = p.waitForToken(ctx)
if err != nil {
return ctx, done, err
}
}
// We only progress here if we got a token.
atomic.StoreInt64(&p.health.lastStarted, time.Now().UnixNano())
ctx = context.WithValue(ctx, healthDiskCtxKey{}, &healthDiskCtxValue{lastSuccess: &p.health.lastSuccess})
si := p.updateStorageMetrics(s, paths...)
var once sync.Once
return ctx, func(errp *error) {
once.Do(func() {
p.health.tokens <- struct{}{}
if errp != nil {
err := *errp
if err != nil && !errors.Is(err, io.EOF) {
return
}
p.health.logSuccess()
}
si(errp)
})
}, nil
}
// waitForToken will wait for a token, while periodically
// checking the disk status.
// If nil is returned a token was picked up.
func (p *xlStorageDiskIDCheck) waitForToken(ctx context.Context) (err error) {
atomic.AddInt32(&p.health.blocked, 1)
defer func() {
atomic.AddInt32(&p.health.blocked, -1)
}()
// Avoid stampeding herd...
ticker := time.NewTicker(5*time.Second + time.Duration(rand.Int63n(int64(5*time.Second))))
defer ticker.Stop()
for {
err = p.checkHealth(ctx)
if err != nil {
return err
}
select {
case <-ticker.C:
// Ticker expired, check health again.
case <-ctx.Done():
return ctx.Err()
case <-p.health.tokens:
return nil
}
}
}
// checkHealth should only be called when tokens have run out.
// This will check if disk should be taken offline.
func (p *xlStorageDiskIDCheck) checkHealth(ctx context.Context) (err error) {
if atomic.LoadInt32(&p.health.status) == diskHealthFaulty {
return errFaultyDisk
}
// Check if there are tokens.
if len(p.health.tokens) > 0 {
return nil
}
const maxTimeSinceLastSuccess = 30 * time.Second
const minTimeSinceLastOpStarted = 15 * time.Second
// To avoid stampeding herd (100s of simultaneous starting requests)
// there must be a delay between the last started request and now
// for the last lastSuccess to be useful.
t := time.Since(time.Unix(0, atomic.LoadInt64(&p.health.lastStarted)))
if t < minTimeSinceLastOpStarted {
return nil
}
// If also more than 15 seconds since last success, take disk offline.
t = time.Since(time.Unix(0, atomic.LoadInt64(&p.health.lastSuccess)))
if t > maxTimeSinceLastSuccess {
if atomic.CompareAndSwapInt32(&p.health.status, diskHealthOK, diskHealthFaulty) {
logger.LogAlwaysIf(ctx, fmt.Errorf("node(%s): taking drive %s offline, time since last response %v", globalLocalNodeName, p.storage.String(), t.Round(time.Millisecond)))
go p.monitorDiskStatus()
}
return errFaultyDisk
}
return nil
}
// monitorDiskStatus should be called once when a drive has been marked offline.
// Once the disk has been deemed ok, it will return to online status.
func (p *xlStorageDiskIDCheck) monitorDiskStatus() {
t := time.NewTicker(5 * time.Second)
defer t.Stop()
fn := mustGetUUID()
for range t.C {
if len(p.health.tokens) == 0 {
// Queue is still full, no need to check.
continue
}
err := p.storage.WriteAll(context.Background(), minioMetaTmpBucket, fn, []byte{10000: 42})
if err != nil {
continue
}
b, err := p.storage.ReadAll(context.Background(), minioMetaTmpBucket, fn)
if err != nil || len(b) != 10001 {
continue
}
err = p.storage.Delete(context.Background(), minioMetaTmpBucket, fn, DeleteOptions{
Recursive: false,
Force: false,
})
if err == nil {
logger.Info("node(%s): Read/Write/Delete successful, bringing drive %s online. Drive was offline for %s.", globalLocalNodeName, p.storage.String(),
time.Since(time.Unix(0, atomic.LoadInt64(&p.health.lastSuccess))))
atomic.StoreInt32(&p.health.status, diskHealthOK)
return
}
}
}
// diskHealthCheckOK will check if the provided error is nil
// and update disk status if good.
// For convenience a bool is returned to indicate any error state
// that is not io.EOF.
func diskHealthCheckOK(ctx context.Context, err error) bool {
// Check if context has a disk health check.
tracker, ok := ctx.Value(healthDiskCtxKey{}).(*healthDiskCtxValue)
if !ok {
// No tracker, return
return err == nil || errors.Is(err, io.EOF)
}
if err == nil || errors.Is(err, io.EOF) {
tracker.logSuccess()
return true
}
return false
}
// diskHealthWrapper provides either a io.Reader or io.Writer
// that updates status of the provided tracker.
// Use through diskHealthReader or diskHealthWriter.
type diskHealthWrapper struct {
tracker *healthDiskCtxValue
r io.Reader
w io.Writer
}
func (d *diskHealthWrapper) Read(p []byte) (int, error) {
if d.r == nil {
return 0, fmt.Errorf("diskHealthWrapper: Read with no reader")
}
n, err := d.r.Read(p)
if err == nil || err == io.EOF && n > 0 {
d.tracker.logSuccess()
}
return n, err
}
func (d *diskHealthWrapper) Write(p []byte) (int, error) {
if d.w == nil {
return 0, fmt.Errorf("diskHealthWrapper: Write with no writer")
}
n, err := d.w.Write(p)
if err == nil && n == len(p) {
d.tracker.logSuccess()
}
return n, err
}
// diskHealthReader provides a wrapper that will update disk health on
// ctx, on every successful read.
// This should only be used directly at the os/syscall level,
// otherwise buffered operations may return false health checks.
func diskHealthReader(ctx context.Context, r io.Reader) io.Reader {
// Check if context has a disk health check.
tracker, ok := ctx.Value(healthDiskCtxKey{}).(*healthDiskCtxValue)
if !ok {
// No need to wrap
return r
}
return &diskHealthWrapper{r: r, tracker: tracker}
}
// diskHealthWriter provides a wrapper that will update disk health on
// ctx, on every successful write.
// This should only be used directly at the os/syscall level,
// otherwise buffered operations may return false health checks.
func diskHealthWriter(ctx context.Context, w io.Writer) io.Writer {
// Check if context has a disk health check.
tracker, ok := ctx.Value(healthDiskCtxKey{}).(*healthDiskCtxValue)
if !ok {
// No need to wrap
return w
}
return &diskHealthWrapper{w: w, tracker: tracker}
}
<file_sep>#!/usr/bin/env bash
# shellcheck disable=SC2120
exit_1() {
cleanup
echo "minio1 ============"
cat /tmp/minio1_1.log
echo "minio2 ============"
cat /tmp/minio2_1.log
echo "minio3 ============"
cat /tmp/minio3_1.log
exit 1
}
cleanup() {
echo "Cleaning up instances of MinIO"
pkill minio
pkill -9 minio
rm -rf /tmp/minio-ldap-idp{1,2,3}
}
cleanup
unset MINIO_KMS_KES_CERT_FILE
unset MINIO_KMS_KES_KEY_FILE
unset MINIO_KMS_KES_ENDPOINT
unset MINIO_KMS_KES_KEY_NAME
export MINIO_CI_CD=1
export MINIO_BROWSER=off
export MINIO_ROOT_USER="minio"
export MINIO_ROOT_PASSWORD="<PASSWORD>"
export MINIO_KMS_AUTO_ENCRYPTION=off
export MINIO_PROMETHEUS_AUTH_TYPE=public
export MINIO_KMS_SECRET_KEY=my-minio-key:<KEY>
export MINIO_IDENTITY_LDAP_SERVER_ADDR="localhost:389"
export MINIO_IDENTITY_LDAP_SERVER_INSECURE="on"
export MINIO_IDENTITY_LDAP_LOOKUP_BIND_DN="cn=admin,dc=min,dc=io"
export MINIO_IDENTITY_LDAP_LOOKUP_BIND_PASSWORD="admin"
export MINIO_IDENTITY_LDAP_USER_DN_SEARCH_BASE_DN="dc=min,dc=io"
export MINIO_IDENTITY_LDAP_USER_DN_SEARCH_FILTER="(uid=%s)"
export MINIO_IDENTITY_LDAP_GROUP_SEARCH_BASE_DN="ou=swengg,dc=min,dc=io"
export MINIO_IDENTITY_LDAP_GROUP_SEARCH_FILTER="(&(objectclass=groupOfNames)(member=%d))"
if [ ! -f ./mc ]; then
wget -O mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
chmod +x mc
fi
minio server --config-dir /tmp/minio-ldap --address ":9001" /tmp/minio-ldap-idp1/{1...4} >/tmp/minio1_1.log 2>&1 &
site1_pid=$!
minio server --config-dir /tmp/minio-ldap --address ":9002" /tmp/minio-ldap-idp2/{1...4} >/tmp/minio2_1.log 2>&1 &
site2_pid=$!
minio server --config-dir /tmp/minio-ldap --address ":9003" /tmp/minio-ldap-idp3/{1...4} >/tmp/minio3_1.log 2>&1 &
site3_pid=$!
sleep 10
export MC_HOST_minio1=http://minio:minio123@localhost:9001
export MC_HOST_minio2=http://minio:minio123@localhost:9002
export MC_HOST_minio3=http://minio:minio123@localhost:9003
./mc admin replicate add minio1 minio2 minio3
./mc idp ldap policy attach minio1 consoleAdmin --user="uid=dillon,ou=people,ou=swengg,dc=min,dc=io"
sleep 5
./mc admin user info minio2 "uid=dillon,ou=people,ou=swengg,dc=min,dc=io"
./mc admin user info minio3 "uid=dillon,ou=people,ou=swengg,dc=min,dc=io"
./mc admin policy create minio1 rw ./docs/site-replication/rw.json
sleep 5
./mc admin policy info minio2 rw >/dev/null 2>&1
./mc admin policy info minio3 rw >/dev/null 2>&1
./mc admin policy remove minio3 rw
sleep 10
./mc admin policy info minio1 rw
if [ $? -eq 0 ]; then
echo "expecting the command to fail, exiting.."
exit_1
fi
./mc admin policy info minio2 rw
if [ $? -eq 0 ]; then
echo "expecting the command to fail, exiting.."
exit_1
fi
./mc admin user info minio1 "uid=dillon,ou=people,ou=swengg,dc=min,dc=io"
if [ $? -ne 0 ]; then
echo "policy mapping missing, exiting.."
exit_1
fi
./mc admin user info minio2 "uid=dillon,ou=people,ou=swengg,dc=min,dc=io"
if [ $? -ne 0 ]; then
echo "policy mapping missing, exiting.."
exit_1
fi
./mc admin user info minio3 "uid=dillon,ou=people,ou=swengg,dc=min,dc=io"
if [ $? -ne 0 ]; then
echo "policy mapping missing, exiting.."
exit_1
fi
# LDAP simple user
./mc admin user svcacct add minio2 dillon --access-key testsvc --secret-key testsvc123
if [ $? -ne 0 ]; then
echo "adding svc account failed, exiting.."
exit_1
fi
sleep 10
./mc admin user svcacct info minio1 testsvc
if [ $? -ne 0 ]; then
echo "svc account not mirrored, exiting.."
exit_1
fi
./mc admin user svcacct info minio2 testsvc
if [ $? -ne 0 ]; then
echo "svc account not mirrored, exiting.."
exit_1
fi
./mc admin user svcacct rm minio1 testsvc
if [ $? -ne 0 ]; then
echo "removing svc account failed, exiting.."
exit_1
fi
sleep 10
./mc admin user svcacct info minio2 testsvc
if [ $? -eq 0 ]; then
echo "svc account found after delete, exiting.."
exit_1
fi
./mc admin user svcacct info minio3 testsvc
if [ $? -eq 0 ]; then
echo "svc account found after delete, exiting.."
exit_1
fi
./mc mb minio1/newbucket
# copy large upload to newbucket on minio1
truncate -s 17M lrgfile
expected_checksum=$(cat ./lrgfile | md5sum)
./mc cp ./lrgfile minio1/newbucket
# create a bucket bucket2 on minio1.
./mc mb minio1/bucket2
sleep 5
./mc stat minio2/newbucket
if [ $? -ne 0 ]; then
echo "expecting bucket to be present. exiting.."
exit_1
fi
./mc stat minio3/newbucket
if [ $? -ne 0 ]; then
echo "expecting bucket to be present. exiting.."
exit_1
fi
./mc cp README.md minio2/newbucket/
sleep 5
./mc stat minio1/newbucket/README.md
if [ $? -ne 0 ]; then
echo "expecting object to be present. exiting.."
exit_1
fi
./mc stat minio3/newbucket/README.md
if [ $? -ne 0 ]; then
echo "expecting object to be present. exiting.."
exit_1
fi
sleep 10
./mc stat minio3/newbucket/lrgfile
if [ $? -ne 0 ]; then
echo "expected object to be present, exiting.."
exit_1
fi
actual_checksum=$(./mc cat minio3/newbucket/lrgfile | md5sum)
if [ "${expected_checksum}" != "${actual_checksum}" ]; then
echo "replication failed on multipart objects expected ${expected_checksum} got ${actual_checksum}"
exit
fi
rm ./lrgfile
vID=$(./mc stat minio2/newbucket/README.md --json | jq .versionID)
if [ $? -ne 0 ]; then
echo "expecting object to be present. exiting.."
exit_1
fi
./mc tag set --version-id "${vID}" minio2/newbucket/README.md "k=v"
if [ $? -ne 0 ]; then
echo "expecting tag set to be successful. exiting.."
exit_1
fi
sleep 5
./mc tag remove --version-id "${vID}" minio2/newbucket/README.md
if [ $? -ne 0 ]; then
echo "expecting tag removal to be successful. exiting.."
exit_1
fi
sleep 5
replStatus_minio2=$(./mc stat minio2/newbucket/README.md --json | jq -r .replicationStatus)
if [ $? -ne 0 ]; then
echo "expecting object to be present. exiting.."
exit_1
fi
if [ ${replStatus_minio2} != "COMPLETED" ]; then
echo "expected tag removal to have replicated, exiting..."
exit_1
fi
./mc rm minio3/newbucket/README.md
sleep 5
./mc stat minio2/newbucket/README.md
if [ $? -eq 0 ]; then
echo "expected file to be deleted, exiting.."
exit_1
fi
./mc stat minio1/newbucket/README.md
if [ $? -eq 0 ]; then
echo "expected file to be deleted, exiting.."
exit_1
fi
./mc mb --with-lock minio3/newbucket-olock
sleep 5
enabled_minio2=$(./mc stat --json minio2/newbucket-olock | jq -r .ObjectLock.enabled)
if [ $? -ne 0 ]; then
echo "expected bucket to be mirrored with object-lock but not present, exiting..."
exit_1
fi
if [ "${enabled_minio2}" != "Enabled" ]; then
echo "expected bucket to be mirrored with object-lock enabled, exiting..."
exit_1
fi
enabled_minio1=$(./mc stat --json minio1/newbucket-olock | jq -r .ObjectLock.enabled)
if [ $? -ne 0 ]; then
echo "expected bucket to be mirrored with object-lock but not present, exiting..."
exit_1
fi
if [ "${enabled_minio1}" != "Enabled" ]; then
echo "expected bucket to be mirrored with object-lock enabled, exiting..."
exit_1
fi
# "Test if most recent tag update is replicated"
./mc tag set minio2/newbucket "key=val1"
if [ $? -ne 0 ]; then
echo "expecting tag set to be successful. exiting.."
exit_1
fi
sleep 10
val=$(./mc tag list minio1/newbucket --json | jq -r .tagset | jq -r .key)
if [ "${val}" != "val1" ]; then
echo "expected bucket tag to have replicated, exiting..."
exit_1
fi
# stop minio1
kill -9 ${site1_pid}
# Update tag on minio2/newbucket when minio1 is down
./mc tag set minio2/newbucket "key=val2"
# create a new bucket on minio2. This should replicate to minio1 after it comes online.
./mc mb minio2/newbucket2
# delete bucket2 on minio2. This should replicate to minio1 after it comes online.
./mc rb minio2/bucket2
# Restart minio1 instance
minio server --config-dir /tmp/minio-ldap --address ":9001" /tmp/minio-ldap-idp1/{1...4} >/tmp/minio1_1.log 2>&1 &
sleep 200
# Test whether most recent tag update on minio2 is replicated to minio1
val=$(./mc tag list minio1/newbucket --json | jq -r .tagset | jq -r .key)
if [ "${val}" != "val2" ]; then
echo "expected bucket tag to have replicated, exiting..."
exit_1
fi
# Test if bucket created/deleted when minio1 is down healed
diff -q <(./mc ls minio1) <(./mc ls minio2) 1>/dev/null
if [ $? -ne 0 ]; then
echo "expected 'bucket2' delete and 'newbucket2' creation to have replicated, exiting..."
exit_1
fi
cleanup
<file_sep>//go:build ignore
// +build ignore
//
// MinIO Object Storage (c) 2022 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package main
import (
"context"
"encoding/json"
"fmt"
"log"
"os"
"time"
"github.com/minio/madmin-go/v3"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are
// dummy values, please replace them with original values.
// API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise.
// New returns an MinIO Admin client object.
madmClnt, err := madmin.New(os.Args[1], os.Args[2], os.Args[3], false)
if err != nil {
log.Fatalln(err)
}
opts := madmin.HealOpts{
Recursive: true, // recursively heal all objects at 'prefix'
Remove: true, // remove content that has lost quorum and not recoverable
Recreate: true, // rewrite all old non-inlined xl.meta to new xl.meta
ScanMode: madmin.HealNormalScan, // by default do not do 'deep' scanning
}
start, _, err := madmClnt.Heal(context.Background(), "healing-rewrite-bucket", "", opts, "", false, false)
if err != nil {
log.Fatalln(err)
}
fmt.Println("Healstart sequence ===")
enc := json.NewEncoder(os.Stdout)
if err = enc.Encode(&start); err != nil {
log.Fatalln(err)
}
fmt.Println()
for {
_, status, err := madmClnt.Heal(context.Background(), "healing-rewrite-bucket", "", opts, start.ClientToken, false, false)
if status.Summary == "finished" {
fmt.Println("Healstatus on items ===")
for _, item := range status.Items {
if err = enc.Encode(&item); err != nil {
log.Fatalln(err)
}
}
break
}
if status.Summary == "stopped" {
fmt.Println("Healstatus on items ===")
fmt.Println("Heal failed with", status.FailureDetail)
break
}
for _, item := range status.Items {
if err = enc.Encode(&item); err != nil {
log.Fatalln(err)
}
}
time.Sleep(time.Second)
}
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"crypto/subtle"
"encoding/json"
"net/http"
"time"
"github.com/minio/kes-go"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
iampolicy "github.com/minio/pkg/iam/policy"
)
// KMSStatusHandler - GET /minio/kms/v1/status
func (a kmsAPIHandlers) KMSStatusHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "KMSStatus")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSStatusAction)
if objectAPI == nil {
return
}
if GlobalKMS == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
return
}
stat, err := GlobalKMS.Stat(ctx)
if err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
return
}
status := madmin.KMSStatus{
Name: stat.Name,
DefaultKeyID: stat.DefaultKey,
Endpoints: make(map[string]madmin.ItemState, len(stat.Endpoints)),
}
for _, endpoint := range stat.Endpoints {
status.Endpoints[endpoint] = madmin.ItemOnline // TODO(aead): Implement an online check for mTLS
}
resp, err := json.Marshal(status)
if err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
return
}
writeSuccessResponseJSON(w, resp)
}
// KMSMetricsHandler - POST /minio/kms/v1/metrics
func (a kmsAPIHandlers) KMSMetricsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "KMSMetrics")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSMetricsAction)
if objectAPI == nil {
return
}
if GlobalKMS == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
return
}
if _, ok := GlobalKMS.(kms.KeyManager); !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
metrics, err := GlobalKMS.Metrics(ctx)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if res, err := json.Marshal(metrics); err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
} else {
writeSuccessResponseJSON(w, res)
}
}
// KMSAPIsHandler - POST /minio/kms/v1/apis
func (a kmsAPIHandlers) KMSAPIsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "KMSAPIs")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSAPIAction)
if objectAPI == nil {
return
}
if GlobalKMS == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
return
}
manager, ok := GlobalKMS.(kms.StatusManager)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
apis, err := manager.APIs(ctx)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if res, err := json.Marshal(apis); err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
} else {
writeSuccessResponseJSON(w, res)
}
}
type versionResponse struct {
Version string `json:"version"`
}
// KMSVersionHandler - POST /minio/kms/v1/version
func (a kmsAPIHandlers) KMSVersionHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "KMSVersion")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSVersionAction)
if objectAPI == nil {
return
}
if GlobalKMS == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
return
}
manager, ok := GlobalKMS.(kms.StatusManager)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
version, err := manager.Version(ctx)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
res := &versionResponse{Version: version}
v, err := json.Marshal(res)
if err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
return
}
writeSuccessResponseJSON(w, v)
}
// KMSCreateKeyHandler - POST /minio/kms/v1/key/create?key-id=<master-key-id>
func (a kmsAPIHandlers) KMSCreateKeyHandler(w http.ResponseWriter, r *http.Request) {
// If env variable MINIO_KMS_SECRET_KEY is populated, prevent creation of new keys
ctx := newContext(r, w, "KMSCreateKey")
if GlobalKMS != nil && GlobalKMS.IsLocal() {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSDefaultKeyAlreadyConfigured), r.URL)
return
}
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSCreateKeyAction)
if objectAPI == nil {
return
}
if GlobalKMS == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
return
}
manager, ok := GlobalKMS.(kms.KeyManager)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
if err := manager.CreateKey(ctx, r.Form.Get("key-id")); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseHeadersOnly(w)
}
// KMSDeleteKeyHandler - DELETE /minio/kms/v1/key/delete?key-id=<master-key-id>
func (a kmsAPIHandlers) KMSDeleteKeyHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "KMSDeleteKey")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSDeleteKeyAction)
if objectAPI == nil {
return
}
if GlobalKMS == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
return
}
manager, ok := GlobalKMS.(kms.KeyManager)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
if err := manager.DeleteKey(ctx, r.Form.Get("key-id")); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseHeadersOnly(w)
}
// KMSListKeysHandler - GET /minio/kms/v1/key/list?pattern=<pattern>
func (a kmsAPIHandlers) KMSListKeysHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "KMSListKeys")
if GlobalKMS != nil && GlobalKMS.IsLocal() {
res, err := json.Marshal(GlobalKMS.List())
if err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
return
}
writeSuccessResponseJSON(w, res)
return
}
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSListKeysAction)
if objectAPI == nil {
return
}
if GlobalKMS == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
return
}
manager, ok := GlobalKMS.(kms.KeyManager)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
return
}
keys, err := manager.ListKeys(ctx, r.Form.Get("pattern"))
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
values, err := keys.Values(0)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if res, err := json.Marshal(values); err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
} else {
writeSuccessResponseJSON(w, res)
}
}
type importKeyRequest struct {
Bytes string
}
// KMSImportKeyHandler - POST /minio/kms/v1/key/import?key-id=<master-key-id>
func (a kmsAPIHandlers) KMSImportKeyHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "KMSImportKey")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSImportKeyAction)
if objectAPI == nil {
return
}
if GlobalKMS == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
return
}
manager, ok := GlobalKMS.(kms.KeyManager)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
var request importKeyRequest
if err := json.NewDecoder(r.Body).Decode(&request); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err := manager.ImportKey(ctx, r.Form.Get("key-id"), []byte(request.Bytes)); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseHeadersOnly(w)
}
// KMSKeyStatusHandler - GET /minio/kms/v1/key/status?key-id=<master-key-id>
func (a kmsAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "KMSKeyStatus")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSKeyStatusAction)
if objectAPI == nil {
return
}
if GlobalKMS == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
return
}
stat, err := GlobalKMS.Stat(ctx)
if err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
return
}
keyID := r.Form.Get("key-id")
if keyID == "" {
keyID = stat.DefaultKey
}
response := madmin.KMSKeyStatus{
KeyID: keyID,
}
kmsContext := kms.Context{"MinIO admin API": "KMSKeyStatusHandler"} // Context for a test key operation
// 1. Generate a new key using the KMS.
key, err := GlobalKMS.GenerateKey(ctx, keyID, kmsContext)
if err != nil {
response.EncryptionErr = err.Error()
resp, err := json.Marshal(response)
if err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
return
}
writeSuccessResponseJSON(w, resp)
return
}
// 2. Verify that we can indeed decrypt the (encrypted) key
decryptedKey, err := GlobalKMS.DecryptKey(key.KeyID, key.Ciphertext, kmsContext)
if err != nil {
response.DecryptionErr = err.Error()
resp, err := json.Marshal(response)
if err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
return
}
writeSuccessResponseJSON(w, resp)
return
}
// 3. Compare generated key with decrypted key
if subtle.ConstantTimeCompare(key.Plaintext, decryptedKey) != 1 {
response.DecryptionErr = "The generated and the decrypted data key do not match"
resp, err := json.Marshal(response)
if err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
return
}
writeSuccessResponseJSON(w, resp)
return
}
resp, err := json.Marshal(response)
if err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
return
}
writeSuccessResponseJSON(w, resp)
}
// KMSDescribePolicyHandler - GET /minio/kms/v1/policy/describe?policy=<policy>
func (a kmsAPIHandlers) KMSDescribePolicyHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "KMSDescribePolicy")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSDescribePolicyAction)
if objectAPI == nil {
return
}
if GlobalKMS == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
return
}
manager, ok := GlobalKMS.(kms.PolicyManager)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
policy, err := manager.DescribePolicy(ctx, r.Form.Get("policy"))
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
p, err := json.Marshal(policy)
if err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
return
}
writeSuccessResponseJSON(w, p)
}
type assignPolicyRequest struct {
Identity string
}
// KMSAssignPolicyHandler - POST /minio/kms/v1/policy/assign?policy=<policy>
func (a kmsAPIHandlers) KMSAssignPolicyHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "KMSAssignPolicy")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSAssignPolicyAction)
if objectAPI == nil {
return
}
if GlobalKMS == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
return
}
manager, ok := GlobalKMS.(kms.PolicyManager)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
var request assignPolicyRequest
if err := json.NewDecoder(r.Body).Decode(&request); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
err := manager.AssignPolicy(ctx, r.Form.Get("policy"), request.Identity)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseHeadersOnly(w)
}
// KMSSetPolicyHandler - POST /minio/kms/v1/policy/policy?policy=<policy>
func (a kmsAPIHandlers) KMSSetPolicyHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "KMSSetPolicy")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSSetPolicyAction)
if objectAPI == nil {
return
}
if GlobalKMS == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
return
}
manager, ok := GlobalKMS.(kms.PolicyManager)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
var policy kes.Policy
if err := json.NewDecoder(r.Body).Decode(&policy); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err := manager.SetPolicy(ctx, r.Form.Get("policy"), &policy); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseHeadersOnly(w)
}
// KMSDeletePolicyHandler - DELETE /minio/kms/v1/policy/delete?policy=<policy>
func (a kmsAPIHandlers) KMSDeletePolicyHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "KMSDeletePolicy")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSDeletePolicyAction)
if objectAPI == nil {
return
}
if GlobalKMS == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
return
}
manager, ok := GlobalKMS.(kms.PolicyManager)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
if err := manager.DeletePolicy(ctx, r.Form.Get("policy")); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseHeadersOnly(w)
}
// KMSListPoliciesHandler - GET /minio/kms/v1/policy/list?pattern=<pattern>
func (a kmsAPIHandlers) KMSListPoliciesHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "KMSListPolicies")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSListPoliciesAction)
if objectAPI == nil {
return
}
if GlobalKMS == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
return
}
manager, ok := GlobalKMS.(kms.PolicyManager)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
policies, err := manager.ListPolicies(ctx, r.Form.Get("pattern"))
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
values, err := policies.Values(0)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if res, err := json.Marshal(values); err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
} else {
writeSuccessResponseJSON(w, res)
}
}
// KMSGetPolicyHandler - GET /minio/kms/v1/policy/get?policy=<policy>
func (a kmsAPIHandlers) KMSGetPolicyHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "KMSGetPolicy")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSGetPolicyAction)
if objectAPI == nil {
return
}
if GlobalKMS == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
return
}
manager, ok := GlobalKMS.(kms.PolicyManager)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
policy, err := manager.GetPolicy(ctx, r.Form.Get("policy"))
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if p, err := json.Marshal(policy); err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
} else {
writeSuccessResponseJSON(w, p)
}
}
// KMSDescribeIdentityHandler - GET /minio/kms/v1/identity/describe?identity=<identity>
func (a kmsAPIHandlers) KMSDescribeIdentityHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "KMSDescribeIdentity")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSDescribeIdentityAction)
if objectAPI == nil {
return
}
if GlobalKMS == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
return
}
manager, ok := GlobalKMS.(kms.IdentityManager)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
identity, err := manager.DescribeIdentity(ctx, r.Form.Get("identity"))
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
i, err := json.Marshal(identity)
if err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
return
}
writeSuccessResponseJSON(w, i)
}
type describeSelfIdentityResponse struct {
Policy *kes.Policy `json:"policy"`
PolicyName string `json:"policyName"`
Identity string `json:"identity"`
IsAdmin bool `json:"isAdmin"`
CreatedAt time.Time `json:"createdAt"`
CreatedBy string `json:"createdBy"`
}
// KMSDescribeSelfIdentityHandler - GET /minio/kms/v1/identity/describe-self
func (a kmsAPIHandlers) KMSDescribeSelfIdentityHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "KMSDescribeSelfIdentity")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSDescribeSelfIdentityAction)
if objectAPI == nil {
return
}
if GlobalKMS == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
return
}
manager, ok := GlobalKMS.(kms.IdentityManager)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
identity, policy, err := manager.DescribeSelfIdentity(ctx)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
res := &describeSelfIdentityResponse{
Policy: policy,
PolicyName: identity.Policy,
Identity: identity.Identity.String(),
IsAdmin: identity.IsAdmin,
CreatedAt: identity.CreatedAt,
CreatedBy: identity.CreatedBy.String(),
}
i, err := json.Marshal(res)
if err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
return
}
writeSuccessResponseJSON(w, i)
}
// KMSDeleteIdentityHandler - DELETE /minio/kms/v1/identity/delete?identity=<identity>
func (a kmsAPIHandlers) KMSDeleteIdentityHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "KMSDeleteIdentity")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSDeleteIdentityAction)
if objectAPI == nil {
return
}
if GlobalKMS == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
return
}
manager, ok := GlobalKMS.(kms.IdentityManager)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
if err := manager.DeleteIdentity(ctx, r.Form.Get("policy")); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseHeadersOnly(w)
}
// KMSListIdentitiesHandler - GET /minio/kms/v1/identity/list?pattern=<pattern>
func (a kmsAPIHandlers) KMSListIdentitiesHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "KMSListIdentities")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.KMSListIdentitiesAction)
if objectAPI == nil {
return
}
if GlobalKMS == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
return
}
manager, ok := GlobalKMS.(kms.IdentityManager)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
identities, err := manager.ListIdentities(ctx, r.Form.Get("pattern"))
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
values, err := identities.Values(0)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if res, err := json.Marshal(values); err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
} else {
writeSuccessResponseJSON(w, res)
}
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package http
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"math"
"net/http"
"net/url"
"os"
"path/filepath"
"sync"
"sync/atomic"
"time"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger/target/types"
"github.com/minio/minio/internal/once"
"github.com/minio/minio/internal/store"
xnet "github.com/minio/pkg/net"
)
const (
// Timeout for the webhook http call
webhookCallTimeout = 5 * time.Second
// maxWorkers is the maximum number of concurrent operations.
maxWorkers = 16
// the suffix for the configured queue dir where the logs will be persisted.
httpLoggerExtension = ".http.log"
)
const (
statusOffline = iota
statusOnline
statusClosed
)
// Config http logger target
type Config struct {
Enabled bool `json:"enabled"`
Name string `json:"name"`
UserAgent string `json:"userAgent"`
Endpoint string `json:"endpoint"`
AuthToken string `json:"authToken"`
ClientCert string `json:"clientCert"`
ClientKey string `json:"clientKey"`
QueueSize int `json:"queueSize"`
QueueDir string `json:"queueDir"`
Proxy string `json:"string"`
Transport http.RoundTripper `json:"-"`
// Custom logger
LogOnce func(ctx context.Context, err error, id string, errKind ...interface{}) `json:"-"`
}
// Target implements logger.Target and sends the json
// format of a log entry to the configured http endpoint.
// An internal buffer of logs is maintained but when the
// buffer is full, new logs are just ignored and an error
// is returned to the caller.
type Target struct {
totalMessages int64
failedMessages int64
status int32
// Worker control
workers int64
workerStartMu sync.Mutex
lastStarted time.Time
wg sync.WaitGroup
// Channel of log entries.
// Reading logCh must hold read lock on logChMu (to avoid read race)
// Sending a value on logCh must hold read lock on logChMu (to avoid closing)
logCh chan interface{}
logChMu sync.RWMutex
// If the first init fails, this starts a goroutine that
// will attempt to establish the connection.
revive sync.Once
// store to persist and replay the logs to the target
// to avoid missing events when the target is down.
store store.Store[interface{}]
storeCtxCancel context.CancelFunc
initQueueStoreOnce once.Init
config Config
client *http.Client
}
// Name returns the name of the target
func (h *Target) Name() string {
return "minio-http-" + h.config.Name
}
// Endpoint returns the backend endpoint
func (h *Target) Endpoint() string {
return h.config.Endpoint
}
func (h *Target) String() string {
return h.config.Name
}
// IsOnline returns true if the target is reachable.
func (h *Target) IsOnline(ctx context.Context) bool {
if err := h.checkAlive(ctx); err != nil {
return !xnet.IsNetworkOrHostDown(err, false)
}
return true
}
// Stats returns the target statistics.
func (h *Target) Stats() types.TargetStats {
h.logChMu.RLock()
queueLength := len(h.logCh)
h.logChMu.RUnlock()
stats := types.TargetStats{
TotalMessages: atomic.LoadInt64(&h.totalMessages),
FailedMessages: atomic.LoadInt64(&h.failedMessages),
QueueLength: queueLength,
}
return stats
}
// This will check if we can reach the remote.
func (h *Target) checkAlive(ctx context.Context) (err error) {
return h.send(ctx, []byte(`{}`), webhookCallTimeout)
}
// Init validate and initialize the http target
func (h *Target) Init(ctx context.Context) (err error) {
if h.config.QueueDir != "" {
return h.initQueueStoreOnce.DoWithContext(ctx, h.initQueueStore)
}
return h.initLogChannel(ctx)
}
func (h *Target) initQueueStore(ctx context.Context) (err error) {
var queueStore store.Store[interface{}]
queueDir := filepath.Join(h.config.QueueDir, h.Name())
queueStore = store.NewQueueStore[interface{}](queueDir, uint64(h.config.QueueSize), httpLoggerExtension)
if err = queueStore.Open(); err != nil {
return fmt.Errorf("unable to initialize the queue store of %s webhook: %w", h.Name(), err)
}
ctx, cancel := context.WithCancel(ctx)
h.store = queueStore
h.storeCtxCancel = cancel
store.StreamItems(h.store, h, ctx.Done(), h.config.LogOnce)
return
}
func (h *Target) initLogChannel(ctx context.Context) (err error) {
switch atomic.LoadInt32(&h.status) {
case statusOnline:
return nil
case statusClosed:
return errors.New("target is closed")
}
if !h.IsOnline(ctx) {
// Start a goroutine that will continue to check if we can reach
h.revive.Do(func() {
go func() {
t := time.NewTicker(time.Second)
defer t.Stop()
for range t.C {
if atomic.LoadInt32(&h.status) != statusOffline {
return
}
if h.IsOnline(ctx) {
// We are online.
if atomic.CompareAndSwapInt32(&h.status, statusOffline, statusOnline) {
h.workerStartMu.Lock()
h.lastStarted = time.Now()
h.workerStartMu.Unlock()
atomic.AddInt64(&h.workers, 1)
go h.startHTTPLogger(ctx)
}
return
}
}
}()
})
return err
}
if atomic.CompareAndSwapInt32(&h.status, statusOffline, statusOnline) {
h.workerStartMu.Lock()
h.lastStarted = time.Now()
h.workerStartMu.Unlock()
atomic.AddInt64(&h.workers, 1)
go h.startHTTPLogger(ctx)
}
return nil
}
func (h *Target) send(ctx context.Context, payload []byte, timeout time.Duration) (err error) {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
req, err := http.NewRequestWithContext(ctx, http.MethodPost,
h.config.Endpoint, bytes.NewReader(payload))
if err != nil {
return fmt.Errorf("invalid configuration for '%s'; %v", h.config.Endpoint, err)
}
req.Header.Set(xhttp.ContentType, "application/json")
req.Header.Set(xhttp.MinIOVersion, xhttp.GlobalMinIOVersion)
req.Header.Set(xhttp.MinioDeploymentID, xhttp.GlobalDeploymentID)
// Set user-agent to indicate MinIO release
// version to the configured log endpoint
req.Header.Set("User-Agent", h.config.UserAgent)
if h.config.AuthToken != "" {
req.Header.Set("Authorization", h.config.AuthToken)
}
resp, err := h.client.Do(req)
if err != nil {
return fmt.Errorf("%s returned '%w', please check your endpoint configuration", h.config.Endpoint, err)
}
// Drain any response.
xhttp.DrainBody(resp.Body)
switch resp.StatusCode {
case http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent:
// accepted HTTP status codes.
return nil
case http.StatusForbidden:
return fmt.Errorf("%s returned '%s', please check if your auth token is correctly set", h.config.Endpoint, resp.Status)
default:
return fmt.Errorf("%s returned '%s', please check your endpoint configuration", h.config.Endpoint, resp.Status)
}
}
func (h *Target) logEntry(ctx context.Context, entry interface{}) {
logJSON, err := json.Marshal(&entry)
if err != nil {
atomic.AddInt64(&h.failedMessages, 1)
return
}
tries := 0
for {
if tries > 0 {
if tries >= 10 || atomic.LoadInt32(&h.status) == statusClosed {
// Don't retry when closing...
return
}
// sleep = (tries+2) ^ 2 milliseconds.
sleep := time.Duration(math.Pow(float64(tries+2), 2)) * time.Millisecond
if sleep > time.Second {
sleep = time.Second
}
time.Sleep(sleep)
}
tries++
if err := h.send(ctx, logJSON, webhookCallTimeout); err != nil {
h.config.LogOnce(ctx, err, h.config.Endpoint)
atomic.AddInt64(&h.failedMessages, 1)
} else {
return
}
}
}
func (h *Target) startHTTPLogger(ctx context.Context) {
h.logChMu.RLock()
logCh := h.logCh
if logCh != nil {
// We are not allowed to add when logCh is nil
h.wg.Add(1)
defer h.wg.Done()
}
h.logChMu.RUnlock()
defer atomic.AddInt64(&h.workers, -1)
if logCh == nil {
return
}
// Send messages until channel is closed.
for entry := range logCh {
atomic.AddInt64(&h.totalMessages, 1)
h.logEntry(ctx, entry)
}
}
// New initializes a new logger target which
// sends log over http to the specified endpoint
func New(config Config) *Target {
h := &Target{
logCh: make(chan interface{}, config.QueueSize),
config: config,
status: statusOffline,
}
// If proxy available, set the same
if h.config.Proxy != "" {
proxyURL, _ := url.Parse(h.config.Proxy)
transport := h.config.Transport
ctransport := transport.(*http.Transport).Clone()
ctransport.Proxy = http.ProxyURL(proxyURL)
h.config.Transport = ctransport
}
h.client = &http.Client{Transport: h.config.Transport}
return h
}
// SendFromStore - reads the log from store and sends it to webhook.
func (h *Target) SendFromStore(key string) (err error) {
var eventData interface{}
eventData, err = h.store.Get(key)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
atomic.AddInt64(&h.totalMessages, 1)
logJSON, err := json.Marshal(&eventData)
if err != nil {
atomic.AddInt64(&h.failedMessages, 1)
return
}
if err := h.send(context.Background(), logJSON, webhookCallTimeout); err != nil {
atomic.AddInt64(&h.failedMessages, 1)
if xnet.IsNetworkOrHostDown(err, true) {
return store.ErrNotConnected
}
return err
}
// Delete the event from store.
return h.store.Del(key)
}
// Send log message 'e' to http target.
// If servers are offline messages are queued until queue is full.
// If Cancel has been called the message is ignored.
func (h *Target) Send(ctx context.Context, entry interface{}) error {
if h.store != nil {
// save the entry to the queue store which will be replayed to the target.
return h.store.Put(entry)
}
if atomic.LoadInt32(&h.status) == statusClosed {
return nil
}
h.logChMu.RLock()
defer h.logChMu.RUnlock()
if h.logCh == nil {
// We are closing...
return nil
}
select {
case h.logCh <- entry:
default:
// Drop messages until we are online.
if !h.IsOnline(ctx) {
atomic.AddInt64(&h.totalMessages, 1)
atomic.AddInt64(&h.failedMessages, 1)
return errors.New("log buffer full and remote offline")
}
nWorkers := atomic.LoadInt64(&h.workers)
if nWorkers < maxWorkers {
// Only have one try to start at the same time.
h.workerStartMu.Lock()
defer h.workerStartMu.Unlock()
// Start one max every second.
if time.Since(h.lastStarted) > time.Second {
if atomic.CompareAndSwapInt64(&h.workers, nWorkers, nWorkers+1) {
// Start another logger.
h.lastStarted = time.Now()
go h.startHTTPLogger(ctx)
}
}
h.logCh <- entry
return nil
}
// log channel is full, do not wait and return
// an error immediately to the caller
atomic.AddInt64(&h.totalMessages, 1)
atomic.AddInt64(&h.failedMessages, 1)
return errors.New("log buffer full, remote endpoint is not able to keep up")
}
return nil
}
// Cancel - cancels the target.
// All queued messages are flushed and the function returns afterwards.
// All messages sent to the target after this function has been called will be dropped.
func (h *Target) Cancel() {
atomic.StoreInt32(&h.status, statusClosed)
// If queuestore is configured, cancel it's context to
// stop the replay go-routine.
if h.store != nil {
h.storeCtxCancel()
}
// Set logch to nil and close it.
// This will block all Send operations,
// and finish the existing ones.
// All future ones will be discarded.
h.logChMu.Lock()
close(h.logCh)
h.logCh = nil
h.logChMu.Unlock()
// Wait for messages to be sent...
h.wg.Wait()
}
// Type - returns type of the target
func (h *Target) Type() types.TargetType {
return types.TargetHTTP
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package kms
import (
"context"
"github.com/minio/kes-go"
)
// KeyManager is the generic interface that handles KMS key operations
type KeyManager interface {
// CreateKey creates a new key at the KMS with the given key ID.
CreateKey(ctx context.Context, keyID string) error
// DeleteKey deletes a key at the KMS with the given key ID.
// Please note that is a dangerous operation.
// Once a key has been deleted all data that has been encrypted with it cannot be decrypted
// anymore, and therefore, is lost.
DeleteKey(ctx context.Context, keyID string) error
// ListKeys List all key names that match the specified pattern. In particular,
// the pattern * lists all keys.
ListKeys(ctx context.Context, pattern string) (*kes.KeyIterator, error)
// ImportKey imports a cryptographic key into the KMS.
ImportKey(ctx context.Context, keyID string, bytes []byte) error
// EncryptKey Encrypts and authenticates a (small) plaintext with the cryptographic key
// The plaintext must not exceed 1 MB
EncryptKey(keyID string, plaintext []byte, context Context) ([]byte, error)
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package kms
import (
"context"
"github.com/minio/kes-go"
)
// PolicyManager is the generic interface that handles KMS policy] operations
type PolicyManager interface {
// DescribePolicy describes a policy by returning its metadata.
// e.g. who created the policy at which point in time.
DescribePolicy(ctx context.Context, policy string) (*kes.PolicyInfo, error)
// AssignPolicy assigns a policy to an identity.
// An identity can have at most one policy while the same policy can be assigned to multiple identities.
// The assigned policy defines which API calls this identity can perform.
// It's not possible to assign a policy to the admin identity.
// Further, an identity cannot assign a policy to itself.
AssignPolicy(ctx context.Context, policy, identity string) error
// SetPolicy creates or updates a policy.
SetPolicy(ctx context.Context, policy string, policyItem *kes.Policy) error
// GetPolicy gets a policy from KMS.
GetPolicy(ctx context.Context, policy string) (*kes.Policy, error)
// ListPolicies list all policy metadata that match the specified pattern.
// In particular, the pattern * lists all policy metadata.
ListPolicies(ctx context.Context, pattern string) (*kes.PolicyIterator, error)
// DeletePolicy deletes a policy from KMS.
// All identities that have been assigned to this policy will lose all authorization privileges.
DeletePolicy(ctx context.Context, policy string) error
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package store
import (
"encoding/json"
"errors"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"time"
"github.com/google/uuid"
)
const (
defaultLimit = 100000 // Default store limit.
defaultExt = ".unknown"
)
// errLimitExceeded error is sent when the maximum limit is reached.
var errLimitExceeded = errors.New("the maximum store limit reached")
// QueueStore - Filestore for persisting items.
type QueueStore[_ any] struct {
sync.RWMutex
entryLimit uint64
directory string
fileExt string
entries map[string]int64 // key -> modtime as unix nano
}
// NewQueueStore - Creates an instance for QueueStore.
func NewQueueStore[I any](directory string, limit uint64, ext string) *QueueStore[I] {
if limit == 0 {
limit = defaultLimit
}
if ext == "" {
ext = defaultExt
}
return &QueueStore[I]{
directory: directory,
entryLimit: limit,
fileExt: ext,
entries: make(map[string]int64, limit),
}
}
// Open - Creates the directory if not present.
func (store *QueueStore[_]) Open() error {
store.Lock()
defer store.Unlock()
if err := os.MkdirAll(store.directory, os.FileMode(0o770)); err != nil {
return err
}
files, err := store.list()
if err != nil {
return err
}
// Truncate entries.
if uint64(len(files)) > store.entryLimit {
files = files[:store.entryLimit]
}
for _, file := range files {
if file.IsDir() {
continue
}
key := strings.TrimSuffix(file.Name(), store.fileExt)
if fi, err := file.Info(); err == nil {
store.entries[key] = fi.ModTime().UnixNano()
}
}
return nil
}
// write - writes an item to the directory.
func (store *QueueStore[I]) write(key string, item I) error {
// Marshalls the item.
eventData, err := json.Marshal(item)
if err != nil {
return err
}
path := filepath.Join(store.directory, key+store.fileExt)
if err := os.WriteFile(path, eventData, os.FileMode(0o770)); err != nil {
return err
}
// Increment the item count.
store.entries[key] = time.Now().UnixNano()
return nil
}
// Put - puts an item to the store.
func (store *QueueStore[I]) Put(item I) error {
store.Lock()
defer store.Unlock()
if uint64(len(store.entries)) >= store.entryLimit {
return errLimitExceeded
}
// Generate a new UUID for the key.
key, err := uuid.NewRandom()
if err != nil {
return err
}
return store.write(key.String(), item)
}
// Get - gets an item from the store.
func (store *QueueStore[I]) Get(key string) (item I, err error) {
store.RLock()
defer func(store *QueueStore[I]) {
store.RUnlock()
if err != nil {
// Upon error we remove the entry.
store.Del(key)
}
}(store)
var eventData []byte
eventData, err = os.ReadFile(filepath.Join(store.directory, key+store.fileExt))
if err != nil {
return item, err
}
if len(eventData) == 0 {
return item, os.ErrNotExist
}
if err = json.Unmarshal(eventData, &item); err != nil {
return item, err
}
return item, nil
}
// Del - Deletes an entry from the store.
func (store *QueueStore[_]) Del(key string) error {
store.Lock()
defer store.Unlock()
return store.del(key)
}
// Len returns the entry count.
func (store *QueueStore[_]) Len() int {
store.RLock()
l := len(store.entries)
defer store.RUnlock()
return l
}
// lockless call
func (store *QueueStore[_]) del(key string) error {
err := os.Remove(filepath.Join(store.directory, key+store.fileExt))
// Delete as entry no matter the result
delete(store.entries, key)
return err
}
// List - lists all files registered in the store.
func (store *QueueStore[_]) List() ([]string, error) {
store.RLock()
l := make([]string, 0, len(store.entries))
for k := range store.entries {
l = append(l, k)
}
// Sort entries...
sort.Slice(l, func(i, j int) bool {
return store.entries[l[i]] < store.entries[l[j]]
})
store.RUnlock()
return l, nil
}
// list will read all entries from disk.
// Entries are returned sorted by modtime, oldest first.
// Underlying entry list in store is *not* updated.
func (store *QueueStore[_]) list() ([]os.DirEntry, error) {
files, err := os.ReadDir(store.directory)
if err != nil {
return nil, err
}
// Sort the entries.
sort.Slice(files, func(i, j int) bool {
ii, err := files[i].Info()
if err != nil {
return false
}
ji, err := files[j].Info()
if err != nil {
return true
}
return ii.ModTime().Before(ji.ModTime())
})
return files, nil
}
// Extension will return the file extension used
// for the items stored in the queue.
func (store *QueueStore[_]) Extension() string {
return store.fileExt
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package replication
//go:generate msgp -file=$GOFILE
// StatusType of Replication for x-amz-replication-status header
type StatusType string
const (
// Pending - replication is pending.
Pending StatusType = "PENDING"
// Completed - replication completed ok.
Completed StatusType = "COMPLETED"
// CompletedLegacy was called "COMPLETE" incorrectly.
CompletedLegacy StatusType = "COMPLETE"
// Failed - replication failed.
Failed StatusType = "FAILED"
// Replica - this is a replica.
Replica StatusType = "REPLICA"
)
// String returns string representation of status
func (s StatusType) String() string {
return string(s)
}
// Empty returns true if this status is not set
func (s StatusType) Empty() bool {
return string(s) == ""
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"encoding/binary"
"errors"
"fmt"
"io"
"math/rand"
"net/http"
"sort"
"strconv"
"strings"
"time"
"github.com/dustin/go-humanize"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/hash"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/console"
"github.com/minio/pkg/env"
"github.com/minio/pkg/workers"
)
// PoolDecommissionInfo currently decommissioning information
type PoolDecommissionInfo struct {
StartTime time.Time `json:"startTime" msg:"st"`
StartSize int64 `json:"startSize" msg:"ss"`
TotalSize int64 `json:"totalSize" msg:"ts"`
CurrentSize int64 `json:"currentSize" msg:"cs"`
Complete bool `json:"complete" msg:"cmp"`
Failed bool `json:"failed" msg:"fl"`
Canceled bool `json:"canceled" msg:"cnl"`
// Internal information.
QueuedBuckets []string `json:"-" msg:"bkts"`
DecommissionedBuckets []string `json:"-" msg:"dbkts"`
// Last bucket/object decommissioned.
Bucket string `json:"-" msg:"bkt"`
// Captures prefix that is currently being
// decommissioned inside the 'Bucket'
Prefix string `json:"-" msg:"pfx"`
Object string `json:"-" msg:"obj"`
// Verbose information
ItemsDecommissioned int64 `json:"objectsDecommissioned" msg:"id"`
ItemsDecommissionFailed int64 `json:"objectsDecommissionedFailed" msg:"idf"`
BytesDone int64 `json:"bytesDecommissioned" msg:"bd"`
BytesFailed int64 `json:"bytesDecommissionedFailed" msg:"bf"`
}
// bucketPop should be called when a bucket is done decommissioning.
// Adds the bucket to the list of decommissioned buckets and updates resume numbers.
func (pd *PoolDecommissionInfo) bucketPop(bucket string) {
pd.DecommissionedBuckets = append(pd.DecommissionedBuckets, bucket)
for i, b := range pd.QueuedBuckets {
if b == bucket {
// Bucket is done.
pd.QueuedBuckets = append(pd.QueuedBuckets[:i], pd.QueuedBuckets[i+1:]...)
// Clear tracker info.
if pd.Bucket == bucket {
pd.Bucket = "" // empty this out for next bucket
pd.Prefix = "" // empty this out for the next bucket
pd.Object = "" // empty this out for next object
}
return
}
}
}
func (pd *PoolDecommissionInfo) isBucketDecommissioned(bucket string) bool {
for _, b := range pd.DecommissionedBuckets {
if b == bucket {
return true
}
}
return false
}
func (pd *PoolDecommissionInfo) bucketPush(bucket decomBucketInfo) {
for _, b := range pd.QueuedBuckets {
if pd.isBucketDecommissioned(b) {
return
}
if b == bucket.String() {
return
}
}
pd.QueuedBuckets = append(pd.QueuedBuckets, bucket.String())
pd.Bucket = bucket.Name
pd.Prefix = bucket.Prefix
}
// PoolStatus captures current pool status
type PoolStatus struct {
ID int `json:"id" msg:"id"`
CmdLine string `json:"cmdline" msg:"cl"`
LastUpdate time.Time `json:"lastUpdate" msg:"lu"`
Decommission *PoolDecommissionInfo `json:"decommissionInfo,omitempty" msg:"dec"`
}
//go:generate msgp -file $GOFILE -unexported
type poolMeta struct {
Version int `msg:"v"`
Pools []PoolStatus `msg:"pls"`
}
// A decommission resumable tells us if decommission is worth
// resuming upon restart of a cluster.
func (p *poolMeta) returnResumablePools() []PoolStatus {
var newPools []PoolStatus
for _, pool := range p.Pools {
if pool.Decommission == nil {
continue
}
if pool.Decommission.Complete || pool.Decommission.Canceled {
// Do not resume decommission upon startup for
// - decommission complete
// - decommission canceled
continue
} // In all other situations we need to resume
newPools = append(newPools, pool)
}
return newPools
}
func (p *poolMeta) DecommissionComplete(idx int) bool {
if p.Pools[idx].Decommission != nil && !p.Pools[idx].Decommission.Complete {
p.Pools[idx].LastUpdate = UTCNow()
p.Pools[idx].Decommission.Complete = true
p.Pools[idx].Decommission.Failed = false
p.Pools[idx].Decommission.Canceled = false
return true
}
return false
}
func (p *poolMeta) DecommissionFailed(idx int) bool {
if p.Pools[idx].Decommission != nil && !p.Pools[idx].Decommission.Failed {
p.Pools[idx].LastUpdate = UTCNow()
p.Pools[idx].Decommission.StartTime = time.Time{}
p.Pools[idx].Decommission.Complete = false
p.Pools[idx].Decommission.Failed = true
p.Pools[idx].Decommission.Canceled = false
return true
}
return false
}
func (p *poolMeta) DecommissionCancel(idx int) bool {
if p.Pools[idx].Decommission != nil && !p.Pools[idx].Decommission.Canceled {
p.Pools[idx].LastUpdate = UTCNow()
p.Pools[idx].Decommission.StartTime = time.Time{}
p.Pools[idx].Decommission.Complete = false
p.Pools[idx].Decommission.Failed = false
p.Pools[idx].Decommission.Canceled = true
return true
}
return false
}
func (p poolMeta) isBucketDecommissioned(idx int, bucket string) bool {
return p.Pools[idx].Decommission.isBucketDecommissioned(bucket)
}
func (p *poolMeta) BucketDone(idx int, bucket decomBucketInfo) {
if p.Pools[idx].Decommission == nil {
// Decommission not in progress.
return
}
p.Pools[idx].Decommission.bucketPop(bucket.String())
}
func (p poolMeta) ResumeBucketObject(idx int) (bucket, object string) {
if p.Pools[idx].Decommission != nil {
bucket = p.Pools[idx].Decommission.Bucket
object = p.Pools[idx].Decommission.Object
}
return
}
func (p *poolMeta) TrackCurrentBucketObject(idx int, bucket string, object string) {
if p.Pools[idx].Decommission == nil {
// Decommission not in progress.
return
}
p.Pools[idx].Decommission.Bucket = bucket
p.Pools[idx].Decommission.Object = object
}
func (p *poolMeta) PendingBuckets(idx int) []decomBucketInfo {
if p.Pools[idx].Decommission == nil {
// Decommission not in progress.
return nil
}
decomBuckets := make([]decomBucketInfo, len(p.Pools[idx].Decommission.QueuedBuckets))
for i := range decomBuckets {
bucket, prefix := path2BucketObject(p.Pools[idx].Decommission.QueuedBuckets[i])
decomBuckets[i] = decomBucketInfo{
Name: bucket,
Prefix: prefix,
}
}
return decomBuckets
}
//msgp:ignore decomBucketInfo
type decomBucketInfo struct {
Name string
Prefix string
}
func (db decomBucketInfo) String() string {
return pathJoin(db.Name, db.Prefix)
}
func (p *poolMeta) QueueBuckets(idx int, buckets []decomBucketInfo) {
// add new queued buckets
for _, bucket := range buckets {
p.Pools[idx].Decommission.bucketPush(bucket)
}
}
var (
errDecommissionAlreadyRunning = errors.New("decommission is already in progress")
errDecommissionComplete = errors.New("decommission is complete, please remove the servers from command-line")
)
func (p *poolMeta) Decommission(idx int, pi poolSpaceInfo) error {
// Return an error when there is decommission on going - the user needs
// to explicitly cancel it first in order to restart decommissioning again.
if p.Pools[idx].Decommission != nil &&
!p.Pools[idx].Decommission.Complete &&
!p.Pools[idx].Decommission.Failed &&
!p.Pools[idx].Decommission.Canceled {
return errDecommissionAlreadyRunning
}
now := UTCNow()
p.Pools[idx].LastUpdate = now
p.Pools[idx].Decommission = &PoolDecommissionInfo{
StartTime: now,
StartSize: pi.Free,
CurrentSize: pi.Free,
TotalSize: pi.Total,
}
return nil
}
func (p poolMeta) IsSuspended(idx int) bool {
return p.Pools[idx].Decommission != nil
}
func (p *poolMeta) validate(pools []*erasureSets) (bool, error) {
type poolInfo struct {
position int
completed bool
decomStarted bool // started but not finished yet
}
rememberedPools := make(map[string]poolInfo)
for idx, pool := range p.Pools {
complete := false
decomStarted := false
if pool.Decommission != nil {
if pool.Decommission.Complete {
complete = true
}
decomStarted = true
}
rememberedPools[pool.CmdLine] = poolInfo{
position: idx,
completed: complete,
decomStarted: decomStarted,
}
}
specifiedPools := make(map[string]int)
for idx, pool := range pools {
specifiedPools[pool.endpoints.CmdLine] = idx
}
var update bool
// Check if specified pools need to be removed from decommissioned pool.
for k := range specifiedPools {
pi, ok := rememberedPools[k]
if !ok {
// we do not have the pool anymore that we previously remembered, since all
// the CLI checks out we can allow updates since we are mostly adding a pool here.
update = true
}
if ok && pi.completed {
return false, fmt.Errorf("pool(%s) = %s is decommissioned, please remove from server command line", humanize.Ordinal(pi.position+1), k)
}
}
if len(specifiedPools) == len(rememberedPools) {
for k, pi := range rememberedPools {
pos, ok := specifiedPools[k]
if ok && pos != pi.position {
update = true // pool order is changing, its okay to allow it.
}
}
}
if !update {
update = len(specifiedPools) != len(rememberedPools)
}
return update, nil
}
func (p *poolMeta) load(ctx context.Context, pool *erasureSets, pools []*erasureSets) error {
data, err := readConfig(ctx, pool, poolMetaName)
if err != nil {
if errors.Is(err, errConfigNotFound) || isErrObjectNotFound(err) {
return nil
}
return err
}
if len(data) == 0 {
// Seems to be empty create a new poolMeta object.
return nil
}
if len(data) <= 4 {
return fmt.Errorf("poolMeta: no data")
}
// Read header
switch binary.LittleEndian.Uint16(data[0:2]) {
case poolMetaFormat:
default:
return fmt.Errorf("poolMeta: unknown format: %d", binary.LittleEndian.Uint16(data[0:2]))
}
switch binary.LittleEndian.Uint16(data[2:4]) {
case poolMetaVersion:
default:
return fmt.Errorf("poolMeta: unknown version: %d", binary.LittleEndian.Uint16(data[2:4]))
}
// OK, parse data.
if _, err = p.UnmarshalMsg(data[4:]); err != nil {
return err
}
switch p.Version {
case poolMetaVersionV1:
default:
return fmt.Errorf("unexpected pool meta version: %d", p.Version)
}
return nil
}
func (p *poolMeta) CountItem(idx int, size int64, failed bool) {
pd := p.Pools[idx].Decommission
if pd != nil {
if failed {
pd.ItemsDecommissionFailed++
pd.BytesFailed += size
} else {
pd.ItemsDecommissioned++
pd.BytesDone += size
}
p.Pools[idx].Decommission = pd
}
}
func (p *poolMeta) updateAfter(ctx context.Context, idx int, pools []*erasureSets, duration time.Duration) (bool, error) {
if p.Pools[idx].Decommission == nil {
return false, errInvalidArgument
}
now := UTCNow()
if now.Sub(p.Pools[idx].LastUpdate) >= duration {
if serverDebugLog {
console.Debugf("decommission: persisting poolMeta on drive: threshold:%s, poolMeta:%#v\n", now.Sub(p.Pools[idx].LastUpdate), p.Pools[idx])
}
p.Pools[idx].LastUpdate = now
if err := p.save(ctx, pools); err != nil {
return false, err
}
return true, nil
}
return false, nil
}
func (p poolMeta) save(ctx context.Context, pools []*erasureSets) error {
data := make([]byte, 4, p.Msgsize()+4)
// Initialize the header.
binary.LittleEndian.PutUint16(data[0:2], poolMetaFormat)
binary.LittleEndian.PutUint16(data[2:4], poolMetaVersion)
buf, err := p.MarshalMsg(data)
if err != nil {
return err
}
// Saves on all pools to make sure decommissioning of first pool is allowed.
for i, eset := range pools {
if err = saveConfig(ctx, eset, poolMetaName, buf); err != nil {
if !errors.Is(err, context.Canceled) {
logger.LogIf(ctx, fmt.Errorf("saving pool.bin for pool index %d failed with: %v", i, err))
}
return err
}
}
return nil
}
const (
poolMetaName = "pool.bin"
poolMetaFormat = 1
poolMetaVersionV1 = 1
poolMetaVersion = poolMetaVersionV1
)
// Init() initializes pools and saves additional information about them
// in 'pool.bin', this is eventually used for decommissioning the pool.
func (z *erasureServerPools) Init(ctx context.Context) error {
// Load rebalance metadata if present
err := z.loadRebalanceMeta(ctx)
if err != nil {
return fmt.Errorf("failed to load rebalance data: %w", err)
}
// Start rebalance routine
z.StartRebalance()
meta := poolMeta{}
if err := meta.load(ctx, z.serverPools[0], z.serverPools); err != nil {
return err
}
update, err := meta.validate(z.serverPools)
if err != nil {
return err
}
// if no update is needed return right away.
if !update {
z.poolMeta = meta
} else {
newMeta := poolMeta{} // to update write poolMeta fresh.
// looks like new pool was added we need to update,
// or this is a fresh installation (or an existing
// installation with pool removed)
newMeta.Version = poolMetaVersion
for idx, pool := range z.serverPools {
var skip bool
for _, currentPool := range meta.Pools {
// Preserve any current pool status.
if currentPool.CmdLine == pool.endpoints.CmdLine {
newMeta.Pools = append(newMeta.Pools, currentPool)
skip = true
break
}
}
if skip {
continue
}
newMeta.Pools = append(newMeta.Pools, PoolStatus{
CmdLine: pool.endpoints.CmdLine,
ID: idx,
LastUpdate: UTCNow(),
})
}
if err = newMeta.save(ctx, z.serverPools); err != nil {
return err
}
z.poolMeta = newMeta
}
pools := meta.returnResumablePools()
poolIndices := make([]int, 0, len(pools))
for _, pool := range pools {
idx := globalEndpoints.GetPoolIdx(pool.CmdLine)
if idx == -1 {
return fmt.Errorf("unexpected state present for decommission status pool(%s) not found", pool.CmdLine)
}
poolIndices = append(poolIndices, idx)
}
if len(poolIndices) > 0 && globalEndpoints[poolIndices[0]].Endpoints[0].IsLocal {
go func() {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for {
if err := z.Decommission(ctx, poolIndices...); err != nil {
if errors.Is(err, errDecommissionAlreadyRunning) {
// A previous decommission running found restart it.
for _, idx := range poolIndices {
z.doDecommissionInRoutine(ctx, idx)
}
return
}
if configRetriableErrors(err) {
logger.LogIf(ctx, fmt.Errorf("Unable to resume decommission of pools %v: %w: retrying..", pools, err))
time.Sleep(time.Second + time.Duration(r.Float64()*float64(5*time.Second)))
continue
}
logger.LogIf(ctx, fmt.Errorf("Unable to resume decommission of pool %v: %w", pools, err))
return
}
}
}()
}
return nil
}
func (z *erasureServerPools) IsDecommissionRunning() bool {
z.poolMetaMutex.RLock()
defer z.poolMetaMutex.RUnlock()
meta := z.poolMeta
for _, pool := range meta.Pools {
if pool.Decommission != nil &&
!pool.Decommission.Complete &&
!pool.Decommission.Failed &&
!pool.Decommission.Canceled {
return true
}
}
return false
}
func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket string, gr *GetObjectReader) (err error) {
objInfo := gr.ObjInfo
defer func() {
gr.Close()
auditLogDecom(ctx, "DecomCopyData", objInfo.Bucket, objInfo.Name, objInfo.VersionID, err)
}()
actualSize, err := objInfo.GetActualSize()
if err != nil {
return err
}
if objInfo.isMultipart() {
res, err := z.NewMultipartUpload(ctx, bucket, objInfo.Name, ObjectOptions{
VersionID: objInfo.VersionID,
UserDefined: objInfo.UserDefined,
})
if err != nil {
return fmt.Errorf("decommissionObject: NewMultipartUpload() %w", err)
}
defer z.AbortMultipartUpload(ctx, bucket, objInfo.Name, res.UploadID, ObjectOptions{})
parts := make([]CompletePart, len(objInfo.Parts))
for i, part := range objInfo.Parts {
hr, err := hash.NewReader(io.LimitReader(gr, part.Size), part.Size, "", "", part.ActualSize)
if err != nil {
return fmt.Errorf("decommissionObject: hash.NewReader() %w", err)
}
pi, err := z.PutObjectPart(ctx, bucket, objInfo.Name, res.UploadID,
part.Number,
NewPutObjReader(hr),
ObjectOptions{
PreserveETag: part.ETag, // Preserve original ETag to ensure same metadata.
IndexCB: func() []byte {
return part.Index // Preserve part Index to ensure decompression works.
},
})
if err != nil {
return fmt.Errorf("decommissionObject: PutObjectPart() %w", err)
}
parts[i] = CompletePart{
ETag: pi.ETag,
PartNumber: pi.PartNumber,
ChecksumCRC32: pi.ChecksumCRC32,
ChecksumCRC32C: pi.ChecksumCRC32C,
ChecksumSHA256: pi.ChecksumSHA256,
ChecksumSHA1: pi.ChecksumSHA1,
}
}
_, err = z.CompleteMultipartUpload(ctx, bucket, objInfo.Name, res.UploadID, parts, ObjectOptions{
MTime: objInfo.ModTime,
})
if err != nil {
err = fmt.Errorf("decommissionObject: CompleteMultipartUpload() %w", err)
}
return err
}
hr, err := hash.NewReader(io.LimitReader(gr, objInfo.Size), objInfo.Size, "", "", actualSize)
if err != nil {
return fmt.Errorf("decommissionObject: hash.NewReader() %w", err)
}
_, err = z.PutObject(ctx,
bucket,
objInfo.Name,
NewPutObjReader(hr),
ObjectOptions{
VersionID: objInfo.VersionID,
MTime: objInfo.ModTime,
UserDefined: objInfo.UserDefined,
PreserveETag: objInfo.ETag, // Preserve original ETag to ensure same metadata.
IndexCB: func() []byte {
return objInfo.Parts[0].Index // Preserve part Index to ensure decompression works.
},
})
if err != nil {
err = fmt.Errorf("decommissionObject: PutObject() %w", err)
}
return err
}
// versionsSorter sorts FileInfo slices by version.
//
//msgp:ignore versionsSorter
type versionsSorter []FileInfo
func (v versionsSorter) reverse() {
sort.Slice(v, func(i, j int) bool {
return v[i].ModTime.Before(v[j].ModTime)
})
}
func (set *erasureObjects) listObjectsToDecommission(ctx context.Context, bi decomBucketInfo, fn func(entry metaCacheEntry)) error {
disks := set.getOnlineDisks()
if len(disks) == 0 {
return fmt.Errorf("no online drives found for set with endpoints %s", set.getEndpoints())
}
// How to resolve partial results.
resolver := metadataResolutionParams{
dirQuorum: len(disks) / 2, // make sure to capture all quorum ratios
objQuorum: len(disks) / 2, // make sure to capture all quorum ratios
bucket: bi.Name,
}
err := listPathRaw(ctx, listPathRawOptions{
disks: disks,
bucket: bi.Name,
path: bi.Prefix,
recursive: true,
forwardTo: "",
minDisks: len(disks) / 2, // to capture all quorum ratios
reportNotFound: false,
agreed: fn,
partial: func(entries metaCacheEntries, _ []error) {
entry, ok := entries.resolve(&resolver)
if ok {
fn(*entry)
}
},
finished: nil,
})
return err
}
func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool *erasureSets, bi decomBucketInfo) error {
ctx = logger.SetReqInfo(ctx, &logger.ReqInfo{})
wStr := env.Get("_MINIO_DECOMMISSION_WORKERS", strconv.Itoa(len(pool.sets)))
workerSize, err := strconv.Atoi(wStr)
if err != nil {
return err
}
// each set get its own thread separate from the concurrent
// objects/versions being decommissioned.
workerSize += len(pool.sets)
wk, err := workers.New(workerSize)
if err != nil {
return err
}
vc, _ := globalBucketVersioningSys.Get(bi.Name)
// Check if the current bucket has a configured lifecycle policy
lc, _ := globalLifecycleSys.Get(bi.Name)
// Check if bucket is object locked.
lr, _ := globalBucketObjectLockSys.Get(bi.Name)
for _, set := range pool.sets {
set := set
filterLifecycle := func(bucket, object string, fi FileInfo) bool {
if lc == nil {
return false
}
versioned := vc != nil && vc.Versioned(object)
objInfo := fi.ToObjectInfo(bucket, object, versioned)
evt := evalActionFromLifecycle(ctx, *lc, lr, objInfo)
switch {
case evt.Action.DeleteRestored(): // if restored copy has expired,delete it synchronously
applyExpiryOnTransitionedObject(ctx, z, objInfo, evt, lcEventSrc_Decom)
return false
case evt.Action.Delete():
globalExpiryState.enqueueByDays(objInfo, evt, lcEventSrc_Decom)
return true
default:
return false
}
}
decommissionEntry := func(entry metaCacheEntry) {
defer wk.Give()
if entry.isDir() {
return
}
fivs, err := entry.fileInfoVersions(bi.Name)
if err != nil {
return
}
// We need a reversed order for decommissioning,
// to create the appropriate stack.
versionsSorter(fivs.Versions).reverse()
var decommissioned, expired int
for _, version := range fivs.Versions {
stopFn := globalDecommissionMetrics.log(decomMetricDecommissionObject, idx, bi.Name, version.Name, version.VersionID)
// Apply lifecycle rules on the objects that are expired.
if filterLifecycle(bi.Name, version.Name, version) {
expired++
decommissioned++
stopFn(errors.New("ILM expired object/version will be skipped"))
continue
}
// any object with only single DEL marker we don't need
// to decommission, just skip it, this also includes
// any other versions that have already expired.
remainingVersions := len(fivs.Versions) - expired
if version.Deleted && remainingVersions == 1 {
decommissioned++
stopFn(errors.New("DELETE marked object with no other non-current versions will be skipped"))
continue
}
versionID := version.VersionID
if versionID == "" {
versionID = nullVersionID
}
if version.Deleted {
_, err := z.DeleteObject(ctx,
bi.Name,
version.Name,
ObjectOptions{
// Since we are preserving a delete marker, we have to make sure this is always true.
// regardless of the current configuration of the bucket we must preserve all versions
// on the pool being decommissioned.
Versioned: true,
VersionID: versionID,
MTime: version.ModTime,
DeleteReplication: version.ReplicationState,
DeleteMarker: true, // make sure we create a delete marker
SkipDecommissioned: true, // make sure we skip the decommissioned pool
})
var failure bool
if err != nil {
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
err = nil
}
}
stopFn(err)
if err != nil {
logger.LogIf(ctx, err)
failure = true
}
z.poolMetaMutex.Lock()
z.poolMeta.CountItem(idx, 0, failure)
z.poolMetaMutex.Unlock()
if !failure {
// Success keep a count.
decommissioned++
}
continue
}
var failure, ignore bool
// gr.Close() is ensured by decommissionObject().
for try := 0; try < 3; try++ {
if version.IsRemote() {
if err := z.DecomTieredObject(ctx, bi.Name, version.Name, version, ObjectOptions{
VersionID: versionID,
MTime: version.ModTime,
UserDefined: version.Metadata,
}); err != nil {
stopFn(err)
failure = true
logger.LogIf(ctx, err)
continue
}
stopFn(nil)
failure = false
break
}
gr, err := set.GetObjectNInfo(ctx,
bi.Name,
encodeDirObject(version.Name),
nil,
http.Header{},
ObjectOptions{
VersionID: versionID,
NoDecryption: true,
NoLock: true,
})
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
// object deleted by the application, nothing to do here we move on.
ignore = true
stopFn(nil)
break
}
if err != nil && !ignore {
// if usage-cache.bin is not readable log and ignore it.
if bi.Name == minioMetaBucket && strings.Contains(version.Name, dataUsageCacheName) {
ignore = true
stopFn(err)
logger.LogIf(ctx, err)
break
}
}
if err != nil {
failure = true
logger.LogIf(ctx, err)
stopFn(err)
continue
}
if err = z.decommissionObject(ctx, bi.Name, gr); err != nil {
stopFn(err)
failure = true
logger.LogIf(ctx, err)
continue
}
stopFn(nil)
failure = false
break
}
if ignore {
continue
}
z.poolMetaMutex.Lock()
z.poolMeta.CountItem(idx, version.Size, failure)
z.poolMetaMutex.Unlock()
if failure {
break // break out on first error
}
decommissioned++
}
// if all versions were decommissioned, then we can delete the object versions.
if decommissioned == len(fivs.Versions) {
stopFn := globalDecommissionMetrics.log(decomMetricDecommissionRemoveObject, idx, bi.Name, entry.name)
_, err := set.DeleteObject(ctx,
bi.Name,
encodeDirObject(entry.name),
ObjectOptions{
DeletePrefix: true, // use prefix delete to delete all versions at once.
},
)
stopFn(err)
auditLogDecom(ctx, "DecomDeleteObject", bi.Name, entry.name, "", err)
if err != nil {
logger.LogIf(ctx, err)
}
}
z.poolMetaMutex.Lock()
z.poolMeta.TrackCurrentBucketObject(idx, bi.Name, entry.name)
ok, err := z.poolMeta.updateAfter(ctx, idx, z.serverPools, 30*time.Second)
logger.LogIf(ctx, err)
if ok {
globalNotificationSys.ReloadPoolMeta(ctx)
}
z.poolMetaMutex.Unlock()
}
wk.Take()
go func() {
defer wk.Give()
err := set.listObjectsToDecommission(ctx, bi,
func(entry metaCacheEntry) {
wk.Take()
go decommissionEntry(entry)
},
)
logger.LogIf(ctx, err)
}()
}
wk.Wait()
return nil
}
//msgp:ignore decomMetrics
type decomMetrics struct{}
var globalDecommissionMetrics decomMetrics
//msgp:ignore decomMetric
//go:generate stringer -type=decomMetric -trimprefix=decomMetric $GOFILE
type decomMetric uint8
const (
decomMetricDecommissionBucket decomMetric = iota
decomMetricDecommissionObject
decomMetricDecommissionRemoveObject
)
func decomTrace(d decomMetric, poolIdx int, startTime time.Time, duration time.Duration, path string, err error) madmin.TraceInfo {
var errStr string
if err != nil {
errStr = err.Error()
}
return madmin.TraceInfo{
TraceType: madmin.TraceDecommission,
Time: startTime,
NodeName: globalLocalNodeName,
FuncName: fmt.Sprintf("decommission.%s (pool-id=%d)", d.String(), poolIdx),
Duration: duration,
Path: path,
Error: errStr,
}
}
func (m *decomMetrics) log(d decomMetric, poolIdx int, paths ...string) func(err error) {
startTime := time.Now()
return func(err error) {
duration := time.Since(startTime)
if globalTrace.NumSubscribers(madmin.TraceDecommission) > 0 {
globalTrace.Publish(decomTrace(d, poolIdx, startTime, duration, strings.Join(paths, " "), err))
}
}
}
func (z *erasureServerPools) decommissionInBackground(ctx context.Context, idx int) error {
pool := z.serverPools[idx]
for _, bucket := range z.poolMeta.PendingBuckets(idx) {
if z.poolMeta.isBucketDecommissioned(idx, bucket.String()) {
if serverDebugLog {
console.Debugln("decommission: already done, moving on", bucket)
}
z.poolMetaMutex.Lock()
z.poolMeta.BucketDone(idx, bucket) // remove from pendingBuckets and persist.
z.poolMeta.save(ctx, z.serverPools)
z.poolMetaMutex.Unlock()
continue
}
if serverDebugLog {
console.Debugln("decommission: currently on bucket", bucket.Name)
}
stopFn := globalDecommissionMetrics.log(decomMetricDecommissionBucket, idx, bucket.Name)
if err := z.decommissionPool(ctx, idx, pool, bucket); err != nil {
stopFn(err)
return err
}
stopFn(nil)
z.poolMetaMutex.Lock()
z.poolMeta.BucketDone(idx, bucket)
z.poolMeta.save(ctx, z.serverPools)
z.poolMetaMutex.Unlock()
}
return nil
}
func (z *erasureServerPools) checkAfterDecom(ctx context.Context, idx int) error {
buckets, err := z.getBucketsToDecommission(ctx)
if err != nil {
return err
}
pool := z.serverPools[idx]
for _, set := range pool.sets {
for _, bi := range buckets {
vc, _ := globalBucketVersioningSys.Get(bi.Name)
// Check if the current bucket has a configured lifecycle policy
lc, _ := globalLifecycleSys.Get(bi.Name)
// Check if bucket is object locked.
lr, _ := globalBucketObjectLockSys.Get(bi.Name)
filterLifecycle := func(bucket, object string, fi FileInfo) bool {
if lc == nil {
return false
}
versioned := vc != nil && vc.Versioned(object)
objInfo := fi.ToObjectInfo(bucket, object, versioned)
evt := evalActionFromLifecycle(ctx, *lc, lr, objInfo)
switch {
case evt.Action.DeleteRestored(): // if restored copy has expired,delete it synchronously
applyExpiryOnTransitionedObject(ctx, z, objInfo, evt, lcEventSrc_Decom)
return false
case evt.Action.Delete():
globalExpiryState.enqueueByDays(objInfo, evt, lcEventSrc_Decom)
return true
default:
return false
}
}
var versionsFound int
err := set.listObjectsToDecommission(ctx, bi, func(entry metaCacheEntry) {
if !entry.isObject() {
return
}
fivs, err := entry.fileInfoVersions(bi.Name)
if err != nil {
return
}
// We need a reversed order for decommissioning,
// to create the appropriate stack.
versionsSorter(fivs.Versions).reverse()
for _, version := range fivs.Versions {
// Apply lifecycle rules on the objects that are expired.
if filterLifecycle(bi.Name, version.Name, version) {
continue
}
// `.usage-cache.bin` still exists, must be not readable ignore it.
if bi.Name == minioMetaBucket && strings.Contains(version.Name, dataUsageCacheName) {
// skipping bucket usage cache name, as its autogenerated.
continue
}
versionsFound++
}
})
if err != nil {
return err
}
if versionsFound > 0 {
return fmt.Errorf("at least %d object(s)/version(s) were found in bucket `%s` after decommissioning", versionsFound, bi.Name)
}
}
}
return nil
}
func (z *erasureServerPools) doDecommissionInRoutine(ctx context.Context, idx int) {
z.poolMetaMutex.Lock()
var dctx context.Context
dctx, z.decommissionCancelers[idx] = context.WithCancel(GlobalContext)
z.poolMetaMutex.Unlock()
// Generate an empty request info so it can be directly modified later by audit
dctx = logger.SetReqInfo(dctx, &logger.ReqInfo{})
if err := z.decommissionInBackground(dctx, idx); err != nil {
logger.LogIf(GlobalContext, err)
logger.LogIf(GlobalContext, z.DecommissionFailed(dctx, idx))
return
}
z.poolMetaMutex.Lock()
failed := z.poolMeta.Pools[idx].Decommission.ItemsDecommissionFailed > 0 || contextCanceled(dctx)
poolCmdLine := z.poolMeta.Pools[idx].CmdLine
z.poolMetaMutex.Unlock()
if !failed {
logger.Info("Decommissioning complete for pool '%s', verifying for any pending objects", poolCmdLine)
err := z.checkAfterDecom(dctx, idx)
if err != nil {
logger.LogIf(ctx, err)
failed = true
}
}
if failed {
// Decommission failed indicate as such.
logger.LogIf(GlobalContext, z.DecommissionFailed(dctx, idx))
} else {
// Complete the decommission..
logger.LogIf(GlobalContext, z.CompleteDecommission(dctx, idx))
}
}
func (z *erasureServerPools) IsSuspended(idx int) bool {
z.poolMetaMutex.RLock()
defer z.poolMetaMutex.RUnlock()
return z.poolMeta.IsSuspended(idx)
}
// Decommission - start decommission session.
func (z *erasureServerPools) Decommission(ctx context.Context, indices ...int) error {
if len(indices) == 0 {
return errInvalidArgument
}
if z.SinglePool() {
return errInvalidArgument
}
// Make pool unwritable before decommissioning.
if err := z.StartDecommission(ctx, indices...); err != nil {
return err
}
go func() {
for _, idx := range indices {
// decommission all pools serially one after
// the other.
z.doDecommissionInRoutine(ctx, idx)
}
}()
// Successfully started decommissioning.
return nil
}
type decomError struct {
Err string
}
func (d decomError) Error() string {
return d.Err
}
type poolSpaceInfo struct {
Free int64
Total int64
Used int64
}
func (z *erasureServerPools) getDecommissionPoolSpaceInfo(idx int) (pi poolSpaceInfo, err error) {
if idx < 0 {
return pi, errInvalidArgument
}
if idx+1 > len(z.serverPools) {
return pi, errInvalidArgument
}
info := z.serverPools[idx].StorageInfo(context.Background())
info.Backend = z.BackendInfo()
usableTotal := int64(GetTotalUsableCapacity(info.Disks, info))
usableFree := int64(GetTotalUsableCapacityFree(info.Disks, info))
return poolSpaceInfo{
Total: usableTotal,
Free: usableFree,
Used: usableTotal - usableFree,
}, nil
}
func (z *erasureServerPools) Status(ctx context.Context, idx int) (PoolStatus, error) {
if idx < 0 {
return PoolStatus{}, errInvalidArgument
}
z.poolMetaMutex.RLock()
defer z.poolMetaMutex.RUnlock()
pi, err := z.getDecommissionPoolSpaceInfo(idx)
if err != nil {
return PoolStatus{}, err
}
poolInfo := z.poolMeta.Pools[idx]
if poolInfo.Decommission != nil {
poolInfo.Decommission.TotalSize = pi.Total
poolInfo.Decommission.CurrentSize = poolInfo.Decommission.StartSize + poolInfo.Decommission.BytesDone
} else {
poolInfo.Decommission = &PoolDecommissionInfo{
TotalSize: pi.Total,
CurrentSize: pi.Free,
}
}
return poolInfo, nil
}
func (z *erasureServerPools) ReloadPoolMeta(ctx context.Context) (err error) {
meta := poolMeta{}
if err = meta.load(ctx, z.serverPools[0], z.serverPools); err != nil {
return err
}
z.poolMetaMutex.Lock()
defer z.poolMetaMutex.Unlock()
z.poolMeta = meta
return nil
}
func (z *erasureServerPools) DecommissionCancel(ctx context.Context, idx int) (err error) {
if idx < 0 {
return errInvalidArgument
}
if z.SinglePool() {
return errInvalidArgument
}
z.poolMetaMutex.Lock()
defer z.poolMetaMutex.Unlock()
if z.poolMeta.DecommissionCancel(idx) {
if fn := z.decommissionCancelers[idx]; fn != nil {
defer fn() // cancel any active thread.
}
if err = z.poolMeta.save(ctx, z.serverPools); err != nil {
return err
}
globalNotificationSys.ReloadPoolMeta(ctx)
}
return nil
}
func (z *erasureServerPools) DecommissionFailed(ctx context.Context, idx int) (err error) {
if idx < 0 {
return errInvalidArgument
}
if z.SinglePool() {
return errInvalidArgument
}
z.poolMetaMutex.Lock()
defer z.poolMetaMutex.Unlock()
if z.poolMeta.DecommissionFailed(idx) {
if fn := z.decommissionCancelers[idx]; fn != nil {
defer fn() // cancel any active thread.
}
if err = z.poolMeta.save(ctx, z.serverPools); err != nil {
return err
}
globalNotificationSys.ReloadPoolMeta(ctx)
}
return nil
}
func (z *erasureServerPools) CompleteDecommission(ctx context.Context, idx int) (err error) {
if idx < 0 {
return errInvalidArgument
}
if z.SinglePool() {
return errInvalidArgument
}
z.poolMetaMutex.Lock()
defer z.poolMetaMutex.Unlock()
if z.poolMeta.DecommissionComplete(idx) {
if fn := z.decommissionCancelers[idx]; fn != nil {
defer fn() // cancel any active thread.
}
if err = z.poolMeta.save(ctx, z.serverPools); err != nil {
return err
}
globalNotificationSys.ReloadPoolMeta(ctx)
}
return nil
}
func (z *erasureServerPools) getBucketsToDecommission(ctx context.Context) ([]decomBucketInfo, error) {
buckets, err := z.ListBuckets(ctx, BucketOptions{})
if err != nil {
return nil, err
}
decomBuckets := make([]decomBucketInfo, len(buckets))
for i := range buckets {
decomBuckets[i] = decomBucketInfo{
Name: buckets[i].Name,
}
}
// Buckets data are dispersed in multiple zones/sets, make
// sure to decommission the necessary metadata.
decomBuckets = append(decomBuckets, decomBucketInfo{
Name: minioMetaBucket,
Prefix: minioConfigPrefix,
})
decomBuckets = append(decomBuckets, decomBucketInfo{
Name: minioMetaBucket,
Prefix: bucketMetaPrefix,
})
return decomBuckets, nil
}
func (z *erasureServerPools) StartDecommission(ctx context.Context, indices ...int) (err error) {
if len(indices) == 0 {
return errInvalidArgument
}
if z.SinglePool() {
return errInvalidArgument
}
decomBuckets, err := z.getBucketsToDecommission(ctx)
if err != nil {
return err
}
for _, bucket := range decomBuckets {
z.HealBucket(ctx, bucket.Name, madmin.HealOpts{})
}
// Create .minio.sys/config, .minio.sys/buckets paths if missing,
// this code is present to avoid any missing meta buckets on other
// pools.
for _, metaBucket := range []string{
pathJoin(minioMetaBucket, minioConfigPrefix),
pathJoin(minioMetaBucket, bucketMetaPrefix),
} {
var bucketExists BucketExists
if err = z.MakeBucket(ctx, metaBucket, MakeBucketOptions{}); err != nil {
if !errors.As(err, &bucketExists) {
return err
}
}
}
z.poolMetaMutex.Lock()
defer z.poolMetaMutex.Unlock()
for _, idx := range indices {
pi, err := z.getDecommissionPoolSpaceInfo(idx)
if err != nil {
return err
}
if err = z.poolMeta.Decommission(idx, pi); err != nil {
return err
}
z.poolMeta.QueueBuckets(idx, decomBuckets)
}
if err = z.poolMeta.save(ctx, z.serverPools); err != nil {
return err
}
globalNotificationSys.ReloadPoolMeta(ctx)
return nil
}
func auditLogDecom(ctx context.Context, apiName, bucket, object, versionID string, err error) {
errStr := ""
if err != nil {
errStr = err.Error()
}
auditLogInternal(ctx, AuditLogOptions{
Event: "decommission",
APIName: apiName,
Bucket: bucket,
Object: object,
VersionID: versionID,
Error: errStr,
})
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"encoding/json"
"io"
"net/http"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
iampolicy "github.com/minio/pkg/iam/policy"
)
// ListLDAPPolicyMappingEntities lists users/groups mapped to given/all policies.
//
// GET <admin-prefix>/idp/ldap/policy-entities?[query-params]
//
// Query params:
//
// user=... -> repeatable query parameter, specifying users to query for
// policy mapping
//
// group=... -> repeatable query parameter, specifying groups to query for
// policy mapping
//
// policy=... -> repeatable query parameter, specifying policy to query for
// user/group mapping
//
// When all query parameters are omitted, returns mappings for all policies.
func (a adminAPIHandlers) ListLDAPPolicyMappingEntities(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListLDAPPolicyMappingEntities")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
// Check authorization.
objectAPI, cred := validateAdminReq(ctx, w, r,
iampolicy.ListGroupsAdminAction, iampolicy.ListUsersAdminAction, iampolicy.ListUserPoliciesAdminAction)
if objectAPI == nil {
return
}
// Validate API arguments.
q := madmin.PolicyEntitiesQuery{
Users: r.Form["user"],
Groups: r.Form["group"],
Policy: r.Form["policy"],
}
// Query IAM
res, err := globalIAMSys.QueryLDAPPolicyEntities(r.Context(), q)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Encode result and send response.
data, err := json.Marshal(res)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
password := <PASSWORD>
econfigData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, econfigData)
}
// AttachDetachPolicyLDAP attaches or detaches policies from an LDAP entity
// (user or group).
//
// POST <admin-prefix>/idp/ldap/policy/{operation}
func (a adminAPIHandlers) AttachDetachPolicyLDAP(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AttachDetachPolicyLDAP")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
// Check authorization.
objectAPI, cred := validateAdminReq(ctx, w, r, iampolicy.UpdatePolicyAssociationAction)
if objectAPI == nil {
return
}
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
return
}
// Ensure body content type is opaque to ensure that request body has not
// been interpreted as form data.
contentType := r.Header.Get("Content-Type")
if contentType != "application/octet-stream" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
return
}
// Validate operation
operation := mux.Vars(r)["operation"]
if operation != "attach" && operation != "detach" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminInvalidArgument), r.URL)
return
}
isAttach := operation == "attach"
// Validate API arguments in body.
password := <PASSWORD>
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
logger.LogIf(ctx, err, logger.Application)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
var par madmin.PolicyAssociationReq
err = json.Unmarshal(reqBytes, &par)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
if err := par.IsValid(); err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
// Call IAM subsystem
updatedAt, addedOrRemoved, _, err := globalIAMSys.PolicyDBUpdateLDAP(ctx, isAttach, par)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
respBody := madmin.PolicyAssociationResp{
UpdatedAt: updatedAt,
}
if isAttach {
respBody.PoliciesAttached = addedOrRemoved
} else {
respBody.PoliciesDetached = addedOrRemoved
}
data, err := json.Marshal(respBody)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
encryptedData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, encryptedData)
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"time"
)
var sharedLockTimeout = newDynamicTimeoutWithOpts(dynamicTimeoutOpts{
timeout: 30 * time.Second,
minimum: 10 * time.Second,
retryInterval: time.Minute,
})
type sharedLock struct {
lockContext chan LockContext
}
func (ld sharedLock) backgroundRoutine(ctx context.Context, objAPI ObjectLayer, lockName string) {
for {
locker := objAPI.NewNSLock(minioMetaBucket, lockName)
lkctx, err := locker.GetLock(ctx, sharedLockTimeout)
if err != nil {
continue
}
keepLock:
for {
select {
case <-ctx.Done():
return
case <-lkctx.Context().Done():
// The context of the lock is canceled, this can happen
// if one lock lost quorum due to cluster instability
// in that case, try to lock again.
break keepLock
case ld.lockContext <- lkctx:
// Send the lock context to anyone asking for it
}
}
}
}
func mergeContext(ctx1, ctx2 context.Context) (context.Context, context.CancelFunc) {
ctx, cancel := context.WithCancel(context.Background())
go func() {
select {
case <-ctx1.Done():
case <-ctx2.Done():
// The lock acquirer decides to cancel, exit this goroutine
case <-ctx.Done():
}
cancel()
}()
return ctx, cancel
}
func (ld sharedLock) GetLock(ctx context.Context) (context.Context, context.CancelFunc) {
l := <-ld.lockContext
return mergeContext(l.Context(), ctx)
}
func newSharedLock(ctx context.Context, objAPI ObjectLayer, lockName string) *sharedLock {
l := &sharedLock{
lockContext: make(chan LockContext),
}
go l.backgroundRoutine(ctx, objAPI, lockName)
return l
}
<file_sep># Fan-Out Uploads [](https://slack.min.io) [](https://hub.docker.com/r/minio/minio/)
## Overview
MinIO implements an S3 extension to perform multiple concurrent fan-out upload operations. A perfect use case scenario for performing fan-out operations of incoming TSB (Time Shift Buffer's). TSBs are a method of facilitating time-shifted playback of television signaling, and media content.
MinIO implements an S3 extension to the [PostUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html) where in a special fan-out list is sent along with the TSB's for MinIO make multiple uploads from a single source stream. Optionally supports custom metadata, tags and other retention settings. All objects are also readable independently once upload is completed via the regular S3 [GetObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) API.
## How to enable Fan-Out Uploads ?
Fan-Out uploads are automatically enabled if `x-minio-fanout-list` form-field is provided with the PostUpload API, to keep things simple higher level APIs are provided in our SDKs for example in `minio-go` SDK:
```
PutObjectFanOut(ctx context.Context, bucket string, fanOutContent io.Reader, fanOutReq minio.PutObjectFanOutRequest) ([]minio.PutObjectFanOutResponse, error)
```
<file_sep>// Copyright (c) 2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"errors"
"fmt"
"io"
"math/rand"
"net/http"
"sync"
"sync/atomic"
"time"
"github.com/dustin/go-humanize"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7"
"github.com/minio/pkg/randreader"
)
// SpeedTestResult return value of the speedtest function
type SpeedTestResult struct {
Endpoint string
Uploads uint64
Downloads uint64
UploadTimes madmin.TimeDurations
DownloadTimes madmin.TimeDurations
DownloadTTFB madmin.TimeDurations
Error string
}
func newRandomReader(size int) io.Reader {
return io.LimitReader(randreader.New(), int64(size))
}
type firstByteRecorder struct {
t *time.Time
r io.Reader
}
func (f *firstByteRecorder) Read(p []byte) (n int, err error) {
if f.t != nil || len(p) == 0 {
return f.r.Read(p)
}
// Read a single byte.
n, err = f.r.Read(p[:1])
if n > 0 {
t := time.Now()
f.t = &t
}
return n, err
}
// Runs the speedtest on local MinIO process.
func selfSpeedTest(ctx context.Context, opts speedTestOpts) (SpeedTestResult, error) {
objAPI := newObjectLayerFn()
if objAPI == nil {
return SpeedTestResult{}, errServerNotInitialized
}
var wg sync.WaitGroup
var errOnce sync.Once
var retError string
var totalBytesWritten uint64
var totalBytesRead uint64
objCountPerThread := make([]uint64, opts.concurrency)
uploadsCtx, uploadsCancel := context.WithCancel(context.Background())
defer uploadsCancel()
go func() {
time.Sleep(opts.duration)
uploadsCancel()
}()
objNamePrefix := pathJoin(speedTest, mustGetUUID())
userMetadata := make(map[string]string)
userMetadata[globalObjectPerfUserMetadata] = "true" // Bypass S3 API freeze
popts := minio.PutObjectOptions{
UserMetadata: userMetadata,
DisableContentSha256: true,
DisableMultipart: true,
}
var mu sync.Mutex
var uploadTimes madmin.TimeDurations
wg.Add(opts.concurrency)
for i := 0; i < opts.concurrency; i++ {
go func(i int) {
defer wg.Done()
for {
t := time.Now()
reader := newRandomReader(opts.objectSize)
tmpObjName := pathJoin(objNamePrefix, fmt.Sprintf("%d/%d", i, objCountPerThread[i]))
info, err := globalMinioClient.PutObject(uploadsCtx, opts.bucketName, tmpObjName, reader, int64(opts.objectSize), popts)
if err != nil {
if !contextCanceled(uploadsCtx) && !errors.Is(err, context.Canceled) {
errOnce.Do(func() {
retError = err.Error()
})
}
uploadsCancel()
return
}
response := time.Since(t)
atomic.AddUint64(&totalBytesWritten, uint64(info.Size))
objCountPerThread[i]++
mu.Lock()
uploadTimes = append(uploadTimes, response)
mu.Unlock()
}
}(i)
}
wg.Wait()
// We already saw write failures, no need to proceed into read's
if retError != "" {
return SpeedTestResult{
Uploads: totalBytesWritten,
Downloads: totalBytesRead,
UploadTimes: uploadTimes,
Error: retError,
}, nil
}
downloadsCtx, downloadsCancel := context.WithCancel(context.Background())
defer downloadsCancel()
go func() {
time.Sleep(opts.duration)
downloadsCancel()
}()
gopts := minio.GetObjectOptions{}
gopts.Set(globalObjectPerfUserMetadata, "true") // Bypass S3 API freeze
var downloadTimes madmin.TimeDurations
var downloadTTFB madmin.TimeDurations
wg.Add(opts.concurrency)
for i := 0; i < opts.concurrency; i++ {
go func(i int) {
defer wg.Done()
var j uint64
if objCountPerThread[i] == 0 {
return
}
for {
if objCountPerThread[i] == j {
j = 0
}
tmpObjName := pathJoin(objNamePrefix, fmt.Sprintf("%d/%d", i, j))
t := time.Now()
r, err := globalMinioClient.GetObject(downloadsCtx, opts.bucketName, tmpObjName, gopts)
if err != nil {
errResp, ok := err.(minio.ErrorResponse)
if ok && errResp.StatusCode == http.StatusNotFound {
continue
}
if !contextCanceled(downloadsCtx) && !errors.Is(err, context.Canceled) {
errOnce.Do(func() {
retError = err.Error()
})
}
downloadsCancel()
return
}
fbr := firstByteRecorder{
r: r,
}
n, err := io.Copy(io.Discard, &fbr)
r.Close()
if err == nil {
response := time.Since(t)
ttfb := time.Since(*fbr.t)
// Only capture success criteria - do not
// have to capture failed reads, truncated
// reads etc.
atomic.AddUint64(&totalBytesRead, uint64(n))
mu.Lock()
downloadTimes = append(downloadTimes, response)
downloadTTFB = append(downloadTTFB, ttfb)
mu.Unlock()
}
if err != nil {
if !contextCanceled(downloadsCtx) && !errors.Is(err, context.Canceled) {
errOnce.Do(func() {
retError = err.Error()
})
}
downloadsCancel()
return
}
j++
}
}(i)
}
wg.Wait()
return SpeedTestResult{
Uploads: totalBytesWritten,
Downloads: totalBytesRead,
UploadTimes: uploadTimes,
DownloadTimes: downloadTimes,
DownloadTTFB: downloadTTFB,
Error: retError,
}, nil
}
// To collect RX stats during "mc support perf net"
// RXSample holds the RX bytes for the duration between
// the last peer to connect and the first peer to disconnect.
// This is to improve the RX throughput accuracy.
type netPerfRX struct {
RX uint64 // RX bytes
lastToConnect time.Time // time at which last peer to connect to us
firstToDisconnect time.Time // time at which the first peer disconnects from us
RXSample uint64 // RX bytes between lastToConnect and firstToDisconnect
activeConnections uint64
sync.RWMutex
}
func (n *netPerfRX) Connect() {
n.Lock()
defer n.Unlock()
n.activeConnections++
atomic.StoreUint64(&globalNetPerfRX.RX, 0)
n.lastToConnect = time.Now()
}
func (n *netPerfRX) Disconnect() {
n.Lock()
defer n.Unlock()
n.activeConnections--
if n.firstToDisconnect.IsZero() {
n.RXSample = atomic.LoadUint64(&n.RX)
n.firstToDisconnect = time.Now()
}
}
func (n *netPerfRX) ActiveConnections() uint64 {
n.RLock()
defer n.RUnlock()
return n.activeConnections
}
func (n *netPerfRX) Reset() {
n.Lock()
defer n.Unlock()
n.RX = 0
n.RXSample = 0
n.lastToConnect = time.Time{}
n.firstToDisconnect = time.Time{}
}
// Reader to read random data.
type netperfReader struct {
n uint64
eof chan struct{}
buf []byte
}
func (m *netperfReader) Read(b []byte) (int, error) {
select {
case <-m.eof:
return 0, io.EOF
default:
}
n := copy(b, m.buf)
atomic.AddUint64(&m.n, uint64(n))
return n, nil
}
func netperf(ctx context.Context, duration time.Duration) madmin.NetperfNodeResult {
r := &netperfReader{eof: make(chan struct{})}
r.buf = make([]byte, 128*humanize.KiByte)
rand.Read(r.buf)
connectionsPerPeer := 16
if len(globalNotificationSys.peerClients) > 16 {
// For a large cluster it's enough to have 1 connection per peer to saturate the network.
connectionsPerPeer = 1
}
errStr := ""
var wg sync.WaitGroup
for index := range globalNotificationSys.peerClients {
if globalNotificationSys.peerClients[index] == nil {
continue
}
go func(index int) {
for i := 0; i < connectionsPerPeer; i++ {
wg.Add(1)
go func() {
defer wg.Done()
err := globalNotificationSys.peerClients[index].DevNull(ctx, r)
if err != nil {
errStr = err.Error()
}
}()
}
}(index)
}
time.Sleep(duration)
close(r.eof)
wg.Wait()
for {
if globalNetPerfRX.ActiveConnections() == 0 {
break
}
time.Sleep(time.Second)
}
rx := float64(globalNetPerfRX.RXSample)
delta := globalNetPerfRX.firstToDisconnect.Sub(globalNetPerfRX.lastToConnect)
if delta < 0 {
rx = 0
errStr = "network disconnection issues detected"
}
globalNetPerfRX.Reset()
return madmin.NetperfNodeResult{Endpoint: "", TX: r.n / uint64(duration.Seconds()), RX: uint64(rx / delta.Seconds()), Error: errStr}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//go:build !race
// +build !race
// Tests in this file are not run under the `-race` flag as they are too slow
// and cause context deadline errors.
package cmd
import (
"context"
"fmt"
"runtime"
"testing"
"time"
"github.com/minio/madmin-go/v3"
minio "github.com/minio/minio-go/v7"
"github.com/minio/pkg/sync/errgroup"
)
func runAllIAMConcurrencyTests(suite *TestSuiteIAM, c *check) {
suite.SetUpSuite(c)
suite.TestDeleteUserRace(c)
suite.TearDownSuite(c)
}
func TestIAMInternalIDPConcurrencyServerSuite(t *testing.T) {
if runtime.GOOS == globalWindowsOSName {
t.Skip("windows is clunky")
}
baseTestCases := []TestSuiteCommon{
// Init and run test on ErasureSD backend with signature v4.
{serverType: "ErasureSD", signer: signerV4},
// Init and run test on ErasureSD backend, with tls enabled.
{serverType: "ErasureSD", signer: signerV4, secure: true},
// Init and run test on Erasure backend.
{serverType: "Erasure", signer: signerV4},
// Init and run test on ErasureSet backend.
{serverType: "ErasureSet", signer: signerV4},
}
testCases := []*TestSuiteIAM{}
for _, bt := range baseTestCases {
testCases = append(testCases,
newTestSuiteIAM(bt, false),
newTestSuiteIAM(bt, true),
)
}
for i, testCase := range testCases {
etcdStr := ""
if testCase.withEtcdBackend {
etcdStr = " (with etcd backend)"
}
t.Run(
fmt.Sprintf("Test: %d, ServerType: %s%s", i+1, testCase.serverType, etcdStr),
func(t *testing.T) {
runAllIAMConcurrencyTests(testCase, &check{t, testCase.serverType})
},
)
}
}
func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), 90*time.Second)
defer cancel()
bucket := getRandomBucketName()
err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket creat error: %v", err)
}
// Create a policy policy
policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::%s/*"
]
}
]
}`, bucket))
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
}
userCount := 50
accessKeys := make([]string, userCount)
secretKeys := make([]string, userCount)
for i := 0; i < userCount; i++ {
accessKey, secretKey := mustGenerateCredentials(c)
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
if err != nil {
c.Fatalf("Unable to set user: %v", err)
}
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
}
accessKeys[i] = accessKey
secretKeys[i] = secretKey
}
g := errgroup.Group{}
for i := 0; i < userCount; i++ {
g.Go(func(i int) func() error {
return func() error {
uClient := s.getUserClient(c, accessKeys[i], secretKeys[i], "")
err := s.adm.RemoveUser(ctx, accessKeys[i])
if err != nil {
return err
}
c.mustNotListObjects(ctx, uClient, bucket)
return nil
}
}(i), i)
}
if errs := g.Wait(); len(errs) > 0 {
c.Fatalf("unable to remove users: %v", errs)
}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package kms
import (
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"errors"
"strings"
"sync"
"github.com/minio/kes-go"
"github.com/minio/pkg/certs"
"github.com/minio/pkg/env"
)
const (
tlsClientSessionCacheSize = 100
)
// Config contains various KMS-related configuration
// parameters - like KMS endpoints or authentication
// credentials.
type Config struct {
// Endpoints contains a list of KMS server
// HTTP endpoints.
Endpoints []string
// Enclave is the KES server enclave. If empty,
// none resp. the default KES server enclave
// will be used.
Enclave string
// DefaultKeyID is the key ID used when
// no explicit key ID is specified for
// a cryptographic operation.
DefaultKeyID string
// APIKey is an credential provided by env. var.
// to authenticate to a KES server. Either an
// API key or a client certificate must be specified.
APIKey kes.APIKey
// Certificate is the client TLS certificate
// to authenticate to KMS via mTLS.
Certificate *certs.Certificate
// ReloadCertEvents is an event channel that receives
// the reloaded client certificate.
ReloadCertEvents <-chan tls.Certificate
// RootCAs is a set of root CA certificates
// to verify the KMS server TLS certificate.
RootCAs *x509.CertPool
}
// NewWithConfig returns a new KMS using the given
// configuration.
func NewWithConfig(config Config) (KMS, error) {
if len(config.Endpoints) == 0 {
return nil, errors.New("kms: no server endpoints")
}
endpoints := make([]string, len(config.Endpoints)) // Copy => avoid being affect by any changes to the original slice
copy(endpoints, config.Endpoints)
var client *kes.Client
if config.APIKey != nil {
cert, err := kes.GenerateCertificate(config.APIKey)
if err != nil {
return nil, err
}
client = kes.NewClientWithConfig("", &tls.Config{
MinVersion: tls.VersionTLS12,
Certificates: []tls.Certificate{cert},
RootCAs: config.RootCAs,
ClientSessionCache: tls.NewLRUClientSessionCache(tlsClientSessionCacheSize),
})
} else {
client = kes.NewClientWithConfig("", &tls.Config{
MinVersion: tls.VersionTLS12,
Certificates: []tls.Certificate{config.Certificate.Get()},
RootCAs: config.RootCAs,
ClientSessionCache: tls.NewLRUClientSessionCache(tlsClientSessionCacheSize),
})
}
client.Endpoints = endpoints
var bulkAvailable bool
_, policy, err := client.DescribeSelf(context.Background())
if err == nil {
const BulkAPI = "/v1/key/bulk/decrypt/"
for _, allow := range policy.Allow {
if strings.HasPrefix(allow, BulkAPI) {
bulkAvailable = true
break
}
}
}
c := &kesClient{
client: client,
enclave: client.Enclave(config.Enclave),
defaultKeyID: config.DefaultKeyID,
bulkAvailable: bulkAvailable,
}
go func() {
if config.Certificate == nil || config.ReloadCertEvents == nil {
return
}
for {
var prevCertificate tls.Certificate
certificate, ok := <-config.ReloadCertEvents
if !ok {
return
}
sameCert := len(certificate.Certificate) == len(prevCertificate.Certificate)
for i, b := range certificate.Certificate {
if !sameCert {
break
}
sameCert = sameCert && bytes.Equal(b, prevCertificate.Certificate[i])
}
// Do not reload if its the same cert as before.
if !sameCert {
client := kes.NewClientWithConfig("", &tls.Config{
MinVersion: tls.VersionTLS12,
Certificates: []tls.Certificate{certificate},
RootCAs: config.RootCAs,
ClientSessionCache: tls.NewLRUClientSessionCache(tlsClientSessionCacheSize),
})
client.Endpoints = endpoints
c.lock.Lock()
c.client = client
c.enclave = c.client.Enclave(config.Enclave)
c.lock.Unlock()
prevCertificate = certificate
}
}
}()
return c, nil
}
type kesClient struct {
lock sync.RWMutex
defaultKeyID string
client *kes.Client
enclave *kes.Enclave
bulkAvailable bool
}
var _ KMS = (*kesClient)(nil) // compiler check
// Stat returns the current KES status containing a
// list of KES endpoints and the default key ID.
func (c *kesClient) Stat(ctx context.Context) (Status, error) {
c.lock.RLock()
defer c.lock.RUnlock()
st, err := c.client.Status(ctx)
if err != nil {
return Status{}, err
}
endpoints := make([]string, len(c.client.Endpoints))
copy(endpoints, c.client.Endpoints)
return Status{
Name: "KES",
Endpoints: endpoints,
DefaultKey: c.defaultKeyID,
Details: st,
}, nil
}
// IsLocal returns true if the KMS is a local implementation
func (c *kesClient) IsLocal() bool {
return env.IsSet(EnvKMSSecretKey)
}
// List returns an array of local KMS Names
func (c *kesClient) List() []kes.KeyInfo {
var kmsSecret []kes.KeyInfo
envKMSSecretKey := env.Get(EnvKMSSecretKey, "")
values := strings.SplitN(envKMSSecretKey, ":", 2)
if len(values) == 2 {
kmsSecret = []kes.KeyInfo{
{
Name: values[0],
},
}
}
return kmsSecret
}
// Metrics retrieves server metrics in the Prometheus exposition format.
func (c *kesClient) Metrics(ctx context.Context) (kes.Metric, error) {
c.lock.RLock()
defer c.lock.RUnlock()
return c.client.Metrics(ctx)
}
// Version retrieves version information
func (c *kesClient) Version(ctx context.Context) (string, error) {
c.lock.RLock()
defer c.lock.RUnlock()
return c.client.Version(ctx)
}
// APIs retrieves a list of supported API endpoints
func (c *kesClient) APIs(ctx context.Context) ([]kes.API, error) {
c.lock.RLock()
defer c.lock.RUnlock()
return c.client.APIs(ctx)
}
// CreateKey tries to create a new key at the KMS with the
// given key ID.
//
// If the a key with the same keyID already exists then
// CreateKey returns kes.ErrKeyExists.
func (c *kesClient) CreateKey(ctx context.Context, keyID string) error {
c.lock.RLock()
defer c.lock.RUnlock()
return c.enclave.CreateKey(ctx, keyID)
}
// DeleteKey deletes a key at the KMS with the given key ID.
// Please note that is a dangerous operation.
// Once a key has been deleted all data that has been encrypted with it cannot be decrypted
// anymore, and therefore, is lost.
func (c *kesClient) DeleteKey(ctx context.Context, keyID string) error {
c.lock.RLock()
defer c.lock.RUnlock()
return c.enclave.DeleteKey(ctx, keyID)
}
// ListKeys List all key names that match the specified pattern. In particular,
// the pattern * lists all keys.
func (c *kesClient) ListKeys(ctx context.Context, pattern string) (*kes.KeyIterator, error) {
c.lock.RLock()
defer c.lock.RUnlock()
return c.enclave.ListKeys(ctx, pattern)
}
// GenerateKey generates a new data encryption key using
// the key at the KES server referenced by the key ID.
//
// The default key ID will be used if keyID is empty.
//
// The context is associated and tied to the generated DEK.
// The same context must be provided when the generated
// key should be decrypted.
func (c *kesClient) GenerateKey(ctx context.Context, keyID string, cryptoCtx Context) (DEK, error) {
c.lock.RLock()
defer c.lock.RUnlock()
if keyID == "" {
keyID = c.defaultKeyID
}
ctxBytes, err := cryptoCtx.MarshalText()
if err != nil {
return DEK{}, err
}
dek, err := c.enclave.GenerateKey(ctx, keyID, ctxBytes)
if err != nil {
return DEK{}, err
}
return DEK{
KeyID: keyID,
Plaintext: dek.Plaintext,
Ciphertext: dek.Ciphertext,
}, nil
}
// ImportKey imports a cryptographic key into the KMS.
func (c *kesClient) ImportKey(ctx context.Context, keyID string, bytes []byte) error {
c.lock.RLock()
defer c.lock.RUnlock()
return c.enclave.ImportKey(ctx, keyID, bytes)
}
// EncryptKey Encrypts and authenticates a (small) plaintext with the cryptographic key
// The plaintext must not exceed 1 MB
func (c *kesClient) EncryptKey(keyID string, plaintext []byte, ctx Context) ([]byte, error) {
c.lock.RLock()
defer c.lock.RUnlock()
ctxBytes, err := ctx.MarshalText()
if err != nil {
return nil, err
}
return c.enclave.Encrypt(context.Background(), keyID, plaintext, ctxBytes)
}
// DecryptKey decrypts the ciphertext with the key at the KES
// server referenced by the key ID. The context must match the
// context value used to generate the ciphertext.
func (c *kesClient) DecryptKey(keyID string, ciphertext []byte, ctx Context) ([]byte, error) {
c.lock.RLock()
defer c.lock.RUnlock()
ctxBytes, err := ctx.MarshalText()
if err != nil {
return nil, err
}
return c.enclave.Decrypt(context.Background(), keyID, ciphertext, ctxBytes)
}
func (c *kesClient) DecryptAll(ctx context.Context, keyID string, ciphertexts [][]byte, contexts []Context) ([][]byte, error) {
c.lock.RLock()
defer c.lock.RUnlock()
if c.bulkAvailable {
CCPs := make([]kes.CCP, 0, len(ciphertexts))
for i := range ciphertexts {
bCtx, err := contexts[i].MarshalText()
if err != nil {
return nil, err
}
CCPs = append(CCPs, kes.CCP{
Ciphertext: ciphertexts[i],
Context: bCtx,
})
}
PCPs, err := c.enclave.DecryptAll(ctx, keyID, CCPs...)
if err != nil {
return nil, err
}
plaintexts := make([][]byte, 0, len(PCPs))
for _, p := range PCPs {
plaintexts = append(plaintexts, p.Plaintext)
}
return plaintexts, nil
}
plaintexts := make([][]byte, 0, len(ciphertexts))
for i := range ciphertexts {
ctxBytes, err := contexts[i].MarshalText()
if err != nil {
return nil, err
}
plaintext, err := c.enclave.Decrypt(ctx, keyID, ciphertexts[i], ctxBytes)
if err != nil {
return nil, err
}
plaintexts = append(plaintexts, plaintext)
}
return plaintexts, nil
}
// DescribePolicy describes a policy by returning its metadata.
// e.g. who created the policy at which point in time.
func (c *kesClient) DescribePolicy(ctx context.Context, policy string) (*kes.PolicyInfo, error) {
c.lock.RLock()
defer c.lock.RUnlock()
return c.enclave.DescribePolicy(ctx, policy)
}
// AssignPolicy assigns a policy to an identity.
// An identity can have at most one policy while the same policy can be assigned to multiple identities.
// The assigned policy defines which API calls this identity can perform.
// It's not possible to assign a policy to the admin identity.
// Further, an identity cannot assign a policy to itself.
func (c *kesClient) AssignPolicy(ctx context.Context, policy, identity string) error {
c.lock.RLock()
defer c.lock.RUnlock()
return c.enclave.AssignPolicy(ctx, policy, kes.Identity(identity))
}
// DeletePolicy deletes a policy from KMS.
// All identities that have been assigned to this policy will lose all authorization privileges.
func (c *kesClient) DeletePolicy(ctx context.Context, policy string) error {
c.lock.RLock()
defer c.lock.RUnlock()
return c.enclave.DeletePolicy(ctx, policy)
}
// ListPolicies list all policy metadata that match the specified pattern.
// In particular, the pattern * lists all policy metadata.
func (c *kesClient) ListPolicies(ctx context.Context, pattern string) (*kes.PolicyIterator, error) {
c.lock.RLock()
defer c.lock.RUnlock()
return c.enclave.ListPolicies(ctx, pattern)
}
// SetPolicy creates or updates a policy.
func (c *kesClient) SetPolicy(ctx context.Context, policy string, policyItem *kes.Policy) error {
c.lock.RLock()
defer c.lock.RUnlock()
return c.enclave.SetPolicy(ctx, policy, policyItem)
}
// GetPolicy gets a policy from KMS.
func (c *kesClient) GetPolicy(ctx context.Context, policy string) (*kes.Policy, error) {
c.lock.RLock()
defer c.lock.RUnlock()
return c.enclave.GetPolicy(ctx, policy)
}
// DescribeIdentity describes an identity by returning its metadata.
// e.g. which policy is currently assigned and whether its an admin identity.
func (c *kesClient) DescribeIdentity(ctx context.Context, identity string) (*kes.IdentityInfo, error) {
c.lock.RLock()
defer c.lock.RUnlock()
return c.enclave.DescribeIdentity(ctx, kes.Identity(identity))
}
// DescribeSelfIdentity describes the identity issuing the request.
// It infers the identity from the TLS client certificate used to authenticate.
// It returns the identity and policy information for the client identity.
func (c *kesClient) DescribeSelfIdentity(ctx context.Context) (*kes.IdentityInfo, *kes.Policy, error) {
c.lock.RLock()
defer c.lock.RUnlock()
return c.enclave.DescribeSelf(ctx)
}
// DeleteIdentity deletes an identity from KMS.
// The client certificate that corresponds to the identity is no longer authorized to perform any API operations.
// The admin identity cannot be deleted.
func (c *kesClient) DeleteIdentity(ctx context.Context, identity string) error {
c.lock.RLock()
defer c.lock.RUnlock()
return c.enclave.DeleteIdentity(ctx, kes.Identity(identity))
}
// ListIdentities list all identity metadata that match the specified pattern.
// In particular, the pattern * lists all identity metadata.
func (c *kesClient) ListIdentities(ctx context.Context, pattern string) (*kes.IdentityIterator, error) {
c.lock.RLock()
defer c.lock.RUnlock()
return c.enclave.ListIdentities(ctx, pattern)
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package event
import "net/http"
// Identity represents access key who caused the event.
type Identity struct {
Type string `json:"type"`
PrincipalID string `json:"principalId"`
AccessKeyID string `json:"accessKeyId"`
}
// UserRequest user request headers
type UserRequest struct {
URL string `json:"url"`
Headers http.Header `json:"headers"`
}
// GetObjectContext provides the necessary details to perform
// download of the object, and return back the processed response
// to the server.
type GetObjectContext struct {
OutputRoute string `json:"outputRoute"`
OutputToken string `json:"outputToken"`
InputS3URL string `json:"inputS3Url"`
}
// Event represents lambda function event, this is undocumented in AWS S3. This
// structure bases itself on this structure but there is no binding.
//
// {
// "xAmzRequestId": "a2871150-1df5-4dc9-ad9f-3da283ca1bf3",
// "getObjectContext": {
// "outputRoute": "...",
// "outputToken": "...",
// "inputS3Url": "<presignedURL>"
// },
// "configuration": { // not useful in MinIO
// "accessPointArn": "...",
// "supportingAccessPointArn": "...",
// "payload": ""
// },
// "userRequest": {
// "url": "...",
// "headers": {
// "Host": "...",
// "X-Amz-Content-SHA256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
// }
// },
// "userIdentity": {
// "type": "IAMUser",
// "principalId": "AIDAJF5MO57RFXQCE5ZNC",
// "arn": "...",
// "accountId": "...",
// "accessKeyId": "<KEY>"
// },
// "protocolVersion": "1.00"
// }
type Event struct {
ProtocolVersion string `json:"protocolVersion"`
GetObjectContext *GetObjectContext `json:"getObjectContext"`
UserIdentity Identity `json:"userIdentity"`
UserRequest UserRequest `json:"userRequest"`
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package parquet
import (
"errors"
"io"
"time"
"github.com/bcicen/jstream"
parquetgo "github.com/fraugster/parquet-go"
parquettypes "github.com/fraugster/parquet-go/parquet"
jsonfmt "github.com/minio/minio/internal/s3select/json"
"github.com/minio/minio/internal/s3select/sql"
)
// Reader implements reading records from parquet input.
type Reader struct {
io.Closer
r *parquetgo.FileReader
}
// NewParquetReader creates a Reader2 from a io.ReadSeekCloser.
func NewParquetReader(rsc io.ReadSeekCloser, _ *ReaderArgs) (r *Reader, err error) {
fr, err := parquetgo.NewFileReader(rsc)
if err != nil {
return nil, errParquetParsingError(err)
}
return &Reader{Closer: rsc, r: fr}, nil
}
func (pr *Reader) Read(dst sql.Record) (rec sql.Record, rerr error) {
nextRow, err := pr.r.NextRow()
if err != nil {
if err == io.EOF {
return nil, err
}
return nil, errParquetParsingError(err)
}
kvs := jstream.KVS{}
for _, col := range pr.r.Columns() {
var value interface{}
if v, ok := nextRow[col.FlatName()]; ok {
value, err = convertFromAnnotation(col.Element(), v)
if err != nil {
return nil, errParquetParsingError(err)
}
}
kvs = append(kvs, jstream.KV{Key: col.FlatName(), Value: value})
}
// Reuse destination if we can.
dstRec, ok := dst.(*jsonfmt.Record)
if !ok {
dstRec = &jsonfmt.Record{}
}
dstRec.SelectFormat = sql.SelectFmtParquet
dstRec.KVS = kvs
return dstRec, nil
}
// convertFromAnnotation - converts values based on the Parquet column's type
// annotations. LogicalType annotations if present override the deprecated
// ConvertedType annotations. Ref:
// https://github.com/apache/parquet-format/blob/master/LogicalTypes.md
func convertFromAnnotation(se *parquettypes.SchemaElement, v interface{}) (interface{}, error) {
if se == nil {
return v, nil
}
var value interface{}
switch val := v.(type) {
case []byte:
// TODO: only strings are supported in s3select output (not
// binary arrays) - perhaps we need to check the annotation to
// ensure it's UTF8 encoded.
value = string(val)
case [12]byte:
// TODO: This is returned for the parquet INT96 type. We just
// treat it same as []byte (but AWS S3 treats it as a large int)
// - fix this later.
value = string(val[:])
case int32:
value = int64(val)
if logicalType := se.GetLogicalType(); logicalType != nil {
if logicalType.IsSetDATE() {
value = sql.FormatSQLTimestamp(time.Unix(60*60*24*int64(val), 0).UTC())
}
} else if se.GetConvertedType() == parquettypes.ConvertedType_DATE {
value = sql.FormatSQLTimestamp(time.Unix(60*60*24*int64(val), 0).UTC())
}
case int64:
value = val
if logicalType := se.GetLogicalType(); logicalType != nil {
if ts := logicalType.GetTIMESTAMP(); ts != nil {
var duration time.Duration
// Only support UTC normalized timestamps.
if ts.IsAdjustedToUTC {
switch {
case ts.Unit.IsSetNANOS():
duration = time.Duration(val) * time.Nanosecond
case ts.Unit.IsSetMILLIS():
duration = time.Duration(val) * time.Millisecond
case ts.Unit.IsSetMICROS():
duration = time.Duration(val) * time.Microsecond
default:
return nil, errors.New("Invalid LogicalType annotation found")
}
value = sql.FormatSQLTimestamp(time.Unix(0, 0).Add(duration))
}
} else if se.GetConvertedType() == parquettypes.ConvertedType_TIMESTAMP_MILLIS {
duration := time.Duration(val) * time.Millisecond
value = sql.FormatSQLTimestamp(time.Unix(0, 0).Add(duration))
} else if se.GetConvertedType() == parquettypes.ConvertedType_TIMESTAMP_MICROS {
duration := time.Duration(val) * time.Microsecond
value = sql.FormatSQLTimestamp(time.Unix(0, 0).Add(duration))
}
}
case float32:
value = float64(val)
default:
value = v
}
return value, nil
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"github.com/minio/madmin-go/v3"
)
// GetTotalCapacity gets the total capacity in the cluster.
func GetTotalCapacity(diskInfo []madmin.Disk) (capacity uint64) {
for _, disk := range diskInfo {
capacity += disk.TotalSpace
}
return
}
// GetTotalUsableCapacity gets the total usable capacity in the cluster.
func GetTotalUsableCapacity(diskInfo []madmin.Disk, s StorageInfo) (capacity uint64) {
for _, disk := range diskInfo {
// Ignore invalid.
if disk.PoolIndex < 0 || len(s.Backend.StandardSCData) <= disk.PoolIndex {
// https://github.com/minio/minio/issues/16500
continue
}
// Ignore parity disks
if disk.DiskIndex < s.Backend.StandardSCData[disk.PoolIndex] {
capacity += disk.TotalSpace
}
}
return
}
// GetTotalCapacityFree gets the total capacity free in the cluster.
func GetTotalCapacityFree(diskInfo []madmin.Disk) (capacity uint64) {
for _, d := range diskInfo {
capacity += d.AvailableSpace
}
return
}
// GetTotalUsableCapacityFree gets the total usable capacity free in the cluster.
func GetTotalUsableCapacityFree(diskInfo []madmin.Disk, s StorageInfo) (capacity uint64) {
for _, disk := range diskInfo {
// Ignore invalid.
if disk.PoolIndex < 0 || len(s.Backend.StandardSCData) <= disk.PoolIndex {
// https://github.com/minio/minio/issues/16500
continue
}
// Ignore parity disks
if disk.DiskIndex < s.Backend.StandardSCData[disk.PoolIndex] {
capacity += disk.AvailableSpace
}
}
return
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package openid
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"sync"
"time"
jwtgo "github.com/golang-jwt/jwt/v4"
"github.com/minio/minio/internal/arn"
"github.com/minio/minio/internal/auth"
iampolicy "github.com/minio/pkg/iam/policy"
xnet "github.com/minio/pkg/net"
)
type publicKeys struct {
*sync.RWMutex
// map of kid to public key
pkMap map[string]interface{}
}
func (pk *publicKeys) parseAndAdd(b io.Reader) error {
var jwk JWKS
err := json.NewDecoder(b).Decode(&jwk)
if err != nil {
return err
}
for _, key := range jwk.Keys {
pkey, err := key.DecodePublicKey()
if err != nil {
return err
}
pk.add(key.Kid, pkey)
}
return nil
}
func (pk *publicKeys) add(keyID string, key interface{}) {
pk.Lock()
defer pk.Unlock()
pk.pkMap[keyID] = key
}
func (pk *publicKeys) get(kid string) interface{} {
pk.RLock()
defer pk.RUnlock()
return pk.pkMap[kid]
}
// PopulatePublicKey - populates a new publickey from the JWKS URL.
func (r *Config) PopulatePublicKey(arn arn.ARN) error {
pCfg := r.arnProviderCfgsMap[arn]
if pCfg.JWKS.URL == nil || pCfg.JWKS.URL.String() == "" {
return nil
}
// Add client secret for the client ID for HMAC based signature.
r.pubKeys.add(pCfg.ClientID, []byte(pCfg.ClientSecret))
client := &http.Client{
Transport: r.transport,
}
resp, err := client.Get(pCfg.JWKS.URL.String())
if err != nil {
return err
}
defer r.closeRespFn(resp.Body)
if resp.StatusCode != http.StatusOK {
return errors.New(resp.Status)
}
return r.pubKeys.parseAndAdd(resp.Body)
}
// ErrTokenExpired - error token expired
var (
ErrTokenExpired = errors.New("token expired")
)
func updateClaimsExpiry(dsecs string, claims map[string]interface{}) error {
expStr := claims["exp"]
if expStr == "" {
return ErrTokenExpired
}
// No custom duration requested, the claims can be used as is.
if dsecs == "" {
return nil
}
expAt, err := auth.ExpToInt64(expStr)
if err != nil {
return err
}
defaultExpiryDuration, err := GetDefaultExpiration(dsecs)
if err != nil {
return err
}
// Verify if JWT expiry is lesser than default expiry duration,
// if that is the case then set the default expiration to be
// from the JWT expiry claim.
if time.Unix(expAt, 0).UTC().Sub(time.Now().UTC()) < defaultExpiryDuration {
defaultExpiryDuration = time.Unix(expAt, 0).UTC().Sub(time.Now().UTC())
} // else honor the specified expiry duration.
claims["exp"] = time.Now().UTC().Add(defaultExpiryDuration).Unix() // update with new expiry.
return nil
}
const (
audClaim = "aud"
azpClaim = "azp"
)
// Validate - validates the id_token.
func (r *Config) Validate(ctx context.Context, arn arn.ARN, token, accessToken, dsecs string, claims jwtgo.MapClaims) error {
jp := new(jwtgo.Parser)
jp.ValidMethods = []string{
"RS256", "RS384", "RS512",
"ES256", "ES384", "ES512",
"HS256", "HS384", "HS512",
"RS3256", "RS3384", "RS3512",
"ES3256", "ES3384", "ES3512",
}
keyFuncCallback := func(jwtToken *jwtgo.Token) (interface{}, error) {
kid, ok := jwtToken.Header["kid"].(string)
if !ok {
return nil, fmt.Errorf("Invalid kid value %v", jwtToken.Header["kid"])
}
return r.pubKeys.get(kid), nil
}
pCfg, ok := r.arnProviderCfgsMap[arn]
if !ok {
return fmt.Errorf("Role %s does not exist", arn)
}
jwtToken, err := jp.ParseWithClaims(token, &claims, keyFuncCallback)
if err != nil {
// Re-populate the public key in-case the JWKS
// pubkeys are refreshed
if err = r.PopulatePublicKey(arn); err != nil {
return err
}
jwtToken, err = jwtgo.ParseWithClaims(token, &claims, keyFuncCallback)
if err != nil {
return err
}
}
if !jwtToken.Valid {
return ErrTokenExpired
}
if err = updateClaimsExpiry(dsecs, claims); err != nil {
return err
}
if err = r.updateUserinfoClaims(ctx, arn, accessToken, claims); err != nil {
return err
}
// Validate that matching clientID appears in the aud or azp claims.
// REQUIRED. Audience(s) that this ID Token is intended for.
// It MUST contain the OAuth 2.0 client_id of the Relying Party
// as an audience value. It MAY also contain identifiers for
// other audiences. In the general case, the aud value is an
// array of case sensitive strings. In the common special case
// when there is one audience, the aud value MAY be a single
// case sensitive
audValues, ok := iampolicy.GetValuesFromClaims(claims, audClaim)
if !ok {
return errors.New("STS JWT Token has `aud` claim invalid, `aud` must match configured OpenID Client ID")
}
if !audValues.Contains(pCfg.ClientID) {
// if audience claims is missing, look for "azp" claims.
// OPTIONAL. Authorized party - the party to which the ID
// Token was issued. If present, it MUST contain the OAuth
// 2.0 Client ID of this party. This Claim is only needed
// when the ID Token has a single audience value and that
// audience is different than the authorized party. It MAY
// be included even when the authorized party is the same
// as the sole audience. The azp value is a case sensitive
// string containing a StringOrURI value
azpValues, ok := iampolicy.GetValuesFromClaims(claims, azpClaim)
if !ok {
return errors.New("STS JWT Token has `azp` claim invalid, `azp` must match configured OpenID Client ID")
}
if !azpValues.Contains(pCfg.ClientID) {
return errors.New("STS JWT Token has `azp` claim invalid, `azp` must match configured OpenID Client ID")
}
}
return nil
}
func (r *Config) updateUserinfoClaims(ctx context.Context, arn arn.ARN, accessToken string, claims map[string]interface{}) error {
pCfg, ok := r.arnProviderCfgsMap[arn]
// If claim user info is enabled, get claims from userInfo
// and overwrite them with the claims from JWT.
if ok && pCfg.ClaimUserinfo {
if accessToken == "" {
return errors.New("access_token is mandatory if user_info claim is enabled")
}
uclaims, err := pCfg.UserInfo(ctx, accessToken, r.transport)
if err != nil {
return err
}
for k, v := range uclaims {
if _, ok := claims[k]; !ok { // only add to claims not update it.
claims[k] = v
}
}
}
return nil
}
// DiscoveryDoc - parses the output from openid-configuration
// for example https://accounts.google.com/.well-known/openid-configuration
type DiscoveryDoc struct {
Issuer string `json:"issuer,omitempty"`
AuthEndpoint string `json:"authorization_endpoint,omitempty"`
TokenEndpoint string `json:"token_endpoint,omitempty"`
EndSessionEndpoint string `json:"end_session_endpoint,omitempty"`
UserInfoEndpoint string `json:"userinfo_endpoint,omitempty"`
RevocationEndpoint string `json:"revocation_endpoint,omitempty"`
JwksURI string `json:"jwks_uri,omitempty"`
ResponseTypesSupported []string `json:"response_types_supported,omitempty"`
SubjectTypesSupported []string `json:"subject_types_supported,omitempty"`
IDTokenSigningAlgValuesSupported []string `json:"id_token_signing_alg_values_supported,omitempty"`
ScopesSupported []string `json:"scopes_supported,omitempty"`
TokenEndpointAuthMethods []string `json:"token_endpoint_auth_methods_supported,omitempty"`
ClaimsSupported []string `json:"claims_supported,omitempty"`
CodeChallengeMethodsSupported []string `json:"code_challenge_methods_supported,omitempty"`
}
func parseDiscoveryDoc(u *xnet.URL, transport http.RoundTripper, closeRespFn func(io.ReadCloser)) (DiscoveryDoc, error) {
d := DiscoveryDoc{}
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
return d, err
}
clnt := http.Client{
Transport: transport,
}
resp, err := clnt.Do(req)
if err != nil {
return d, err
}
defer closeRespFn(resp.Body)
if resp.StatusCode != http.StatusOK {
return d, fmt.Errorf("unexpected error returned by %s : status(%s)", u, resp.Status)
}
dec := json.NewDecoder(resp.Body)
if err = dec.Decode(&d); err != nil {
return d, err
}
return d, nil
}
<file_sep>#!/bin/sh
set -e ; # Have script exit in the event of a failed command.
{{- if .Values.configPathmc }}
MC_CONFIG_DIR="{{ .Values.configPathmc }}"
MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}"
{{- else }}
MC="/usr/bin/mc --insecure"
{{- end }}
# AccessKey and secretkey credentials file are added to prevent shell execution errors caused by special characters.
# Special characters for example : ',",<,>,{,}
MINIO_ACCESSKEY_SECRETKEY_TMP="/tmp/accessKey_and_secretKey_svcacct_tmp"
# connectToMinio
# Use a check-sleep-check loop to wait for MinIO service to be available
connectToMinio() {
SCHEME=$1
ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts
set -e ; # fail if we can't read the keys.
ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ;
set +e ; # The connections to minio are allowed to fail.
echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ;
MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ;
$MC_COMMAND ;
STATUS=$? ;
until [ $STATUS = 0 ]
do
ATTEMPTS=`expr $ATTEMPTS + 1` ;
echo \"Failed attempts: $ATTEMPTS\" ;
if [ $ATTEMPTS -gt $LIMIT ]; then
exit 1 ;
fi ;
sleep 2 ; # 2 second intervals between attempts
$MC_COMMAND ;
STATUS=$? ;
done ;
set -e ; # reset `e` as active
return 0
}
# checkSvcacctExists ()
# Check if the svcacct exists, by using the exit code of `mc admin user svcacct info`
checkSvcacctExists() {
CMD=$(${MC} admin user svcacct info myminio $(head -1 $MINIO_ACCESSKEY_SECRETKEY_TMP) > /dev/null 2>&1)
return $?
}
# createSvcacct ($user)
createSvcacct () {
USER=$1
FILENAME=$2
#check accessKey_and_secretKey_tmp file
if [[ ! -f $MINIO_ACCESSKEY_SECRETKEY_TMP ]];then
echo "credentials file does not exist"
return 1
fi
if [[ $(cat $MINIO_ACCESSKEY_SECRETKEY_TMP|wc -l) -ne 2 ]];then
echo "credentials file is invalid"
rm -f $MINIO_ACCESSKEY_SECRETKEY_TMP
return 1
fi
SVCACCT=$(head -1 $MINIO_ACCESSKEY_SECRETKEY_TMP)
# Create the svcacct if it does not exist
if ! checkSvcacctExists ; then
echo "Creating svcacct '$SVCACCT'"
# Check if policy file is define
if [ -z $FILENAME ]; then
${MC} admin user svcacct add --access-key $(head -1 $MINIO_ACCESSKEY_SECRETKEY_TMP) --secret-key $(tail -n1 $MINIO_ACCESSKEY_SECRETKEY_TMP) myminio $USER
else
${MC} admin user svcacct add --access-key $(head -1 $MINIO_ACCESSKEY_SECRETKEY_TMP) --secret-key $(tail -n1 $MINIO_ACCESSKEY_SECRETKEY_TMP) --policy /config/$FILENAME.json myminio $USER
fi
else
echo "Svcacct '$SVCACCT' already exists."
fi
#clean up credentials files.
rm -f $MINIO_ACCESSKEY_SECRETKEY_TMP
}
# Try connecting to MinIO instance
{{- if .Values.tls.enabled }}
scheme=https
{{- else }}
scheme=http
{{- end }}
connectToMinio $scheme
{{ if .Values.svcaccts }}
{{ $global := . }}
# Create the svcaccts
{{- range $idx, $svc := .Values.svcaccts }}
echo {{ tpl .accessKey $global }} > $MINIO_ACCESSKEY_SECRETKEY_TMP
{{- if .existingSecret }}
cat /config/secrets-svc/{{ tpl .existingSecret $global }}/{{ tpl .existingSecretKey $global }} >> $MINIO_ACCESSKEY_SECRETKEY_TMP
# Add a new line if it doesn't exist
sed -i '$a\' $MINIO_ACCESSKEY_SECRETKEY_TMP
{{ else }}
echo {{ .secretKey }} >> $MINIO_ACCESSKEY_SECRETKEY_TMP
{{- end }}
{{- if $svc.policy}}
createSvcacct {{ .user }} svc_policy_{{ $idx }}
{{ else }}
createSvcacct {{ .user }}
{{- end }}
{{- end }}
{{- end }}
<file_sep>#!/bin/bash
set -x
export MINIO_CI_CD=1
killall -9 minio
rm -rf ${HOME}/tmp/dist
scheme="http"
nr_servers=4
addr="localhost"
args=""
for ((i = 0; i < $((nr_servers)); i++)); do
args="$args $scheme://$addr:$((9100 + i))/${HOME}/tmp/dist/path1/$i"
done
echo $args
for ((i = 0; i < $((nr_servers)); i++)); do
(minio server --address ":$((9100 + i))" $args 2>&1 >/tmp/log$i.txt) &
done
sleep 10s
if [ ! -f ./mc ]; then
wget --quiet -O ./mc https://dl.minio.io/client/mc/release/linux-amd64/./mc &&
chmod +x mc
fi
set +e
export MC_HOST_minioadm=http://minioadmin:minioadmin@localhost:9100/
./mc ls minioadm/
./mc admin config set minioadm/ api root_access=off
sleep 3s # let things settle a little
./mc ls minioadm/
if [ $? -eq 0 ]; then
echo "listing succeeded, 'minioadmin' was not disabled"
exit 1
fi
set -e
killall -9 minio
export MINIO_API_ROOT_ACCESS=on
for ((i = 0; i < $((nr_servers)); i++)); do
(minio server --address ":$((9100 + i))" $args 2>&1 >/tmp/log$i.txt) &
done
set +e
./mc ls minioadm/
if [ $? -ne 0 ]; then
echo "listing failed, 'minioadmin' should be enabled"
exit 1
fi
killall -9 minio
rm -rf /tmp/multisitea/
rm -rf /tmp/multisiteb/
echo "Setup site-replication and then disable root credentials"
minio server --address 127.0.0.1:9001 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_1.log 2>&1 &
minio server --address 127.0.0.1:9002 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_2.log 2>&1 &
minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_1.log 2>&1 &
minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
sleep 20s
export MC_HOST_sitea=http://minioadmin:[email protected]:9001
export MC_HOST_siteb=http://minioadmin:[email protected]:9004
./mc admin replicate add sitea siteb
./mc admin user add sitea foobar foo12345
./mc admin policy attach sitea/ consoleAdmin --user=foobar
./mc admin user info siteb foobar
killall -9 minio
echo "turning off root access, however site replication must continue"
export MINIO_API_ROOT_ACCESS=off
minio server --address 127.0.0.1:9001 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_1.log 2>&1 &
minio server --address 127.0.0.1:9002 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_2.log 2>&1 &
minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_1.log 2>&1 &
minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
sleep 20s
export MC_HOST_sitea=http://foobar:[email protected]:9001
export MC_HOST_siteb=http://foobar:[email protected]:9004
./mc admin user add sitea foobar-admin foo12345
sleep 2s
./mc admin user info siteb foobar-admin
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"errors"
"fmt"
"io"
"os"
"github.com/minio/madmin-go/v3/estream"
)
func extractInspectV2(pk []byte, r io.Reader, w io.Writer) error {
privKey, err := bytesToPrivateKey(pk)
if err != nil {
return fmt.Errorf("decoding key returned: %w", err)
}
sr, err := estream.NewReader(r)
if err != nil {
return err
}
sr.SetPrivateKey(privKey)
sr.ReturnNonDecryptable(true)
// Debug corrupted streams.
if false {
sr.SkipEncrypted(true)
return sr.DebugStream(os.Stdout)
}
for {
stream, err := sr.NextStream()
if err != nil {
if err == io.EOF {
return errors.New("no data found on stream")
}
if errors.Is(err, estream.ErrNoKey) {
if stream.Name == "inspect.zip" {
return errors.New("incorrect private key")
}
if err := stream.Skip(); err != nil {
return fmt.Errorf("stream skip: %w", err)
}
continue
}
return fmt.Errorf("next stream: %w", err)
}
if stream.Name == "inspect.zip" {
_, err := io.Copy(w, stream)
if err != nil {
return fmt.Errorf("reading inspect stream: %w", err)
}
return nil
}
if err := stream.Skip(); err != nil {
return fmt.Errorf("stream skip: %w", err)
}
}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"strings"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
iampolicy "github.com/minio/pkg/iam/policy"
)
var (
errRebalanceDecommissionAlreadyRunning = errors.New("Rebalance cannot be started, decommission is aleady in progress")
errDecommissionRebalanceAlreadyRunning = errors.New("Decommission cannot be started, rebalance is already in progress")
)
func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "StartDecommission")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.DecommissionAdminAction)
if objectAPI == nil {
return
}
// Legacy args style such as non-ellipses style is not supported with this API.
if globalEndpoints.Legacy() {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
z, ok := objectAPI.(*erasureServerPools)
if !ok || len(z.serverPools) == 1 {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
if z.IsDecommissionRunning() {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errDecommissionAlreadyRunning), r.URL)
return
}
if z.IsRebalanceStarted() {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceAlreadyStarted), r.URL)
return
}
vars := mux.Vars(r)
v := vars["pool"]
pools := strings.Split(v, ",")
poolIndices := make([]int, 0, len(pools))
for _, pool := range pools {
idx := globalEndpoints.GetPoolIdx(pool)
if idx == -1 {
// We didn't find any matching pools, invalid input
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
return
}
var pool *erasureSets
for pidx := range z.serverPools {
if pidx == idx {
pool = z.serverPools[idx]
break
}
}
if pool == nil {
// We didn't find any matching pools, invalid input
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
return
}
poolIndices = append(poolIndices, idx)
}
if len(poolIndices) > 0 && !globalEndpoints[poolIndices[0]].Endpoints[0].IsLocal {
ep := globalEndpoints[poolIndices[0]].Endpoints[0]
for nodeIdx, proxyEp := range globalProxyEndpoints {
if proxyEp.Endpoint.Host == ep.Host {
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
return
}
}
}
}
if err := z.Decommission(r.Context(), poolIndices...); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
func (a adminAPIHandlers) CancelDecommission(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "CancelDecommission")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.DecommissionAdminAction)
if objectAPI == nil {
return
}
// Legacy args style such as non-ellipses style is not supported with this API.
if globalEndpoints.Legacy() {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
pools, ok := objectAPI.(*erasureServerPools)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
vars := mux.Vars(r)
v := vars["pool"]
idx := globalEndpoints.GetPoolIdx(v)
if idx == -1 {
// We didn't find any matching pools, invalid input
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
return
}
if ep := globalEndpoints[idx].Endpoints[0]; !ep.IsLocal {
for nodeIdx, proxyEp := range globalProxyEndpoints {
if proxyEp.Endpoint.Host == ep.Host {
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
return
}
}
}
}
if err := pools.DecommissionCancel(ctx, idx); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
func (a adminAPIHandlers) StatusPool(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "StatusPool")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ServerInfoAdminAction, iampolicy.DecommissionAdminAction)
if objectAPI == nil {
return
}
// Legacy args style such as non-ellipses style is not supported with this API.
if globalEndpoints.Legacy() {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
pools, ok := objectAPI.(*erasureServerPools)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
vars := mux.Vars(r)
v := vars["pool"]
idx := globalEndpoints.GetPoolIdx(v)
if idx == -1 {
apiErr := toAdminAPIErr(ctx, errInvalidArgument)
apiErr.Description = fmt.Sprintf("specified pool '%s' not found, please specify a valid pool", v)
// We didn't find any matching pools, invalid input
writeErrorResponseJSON(ctx, w, apiErr, r.URL)
return
}
status, err := pools.Status(r.Context(), idx)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(&status))
}
func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListPools")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ServerInfoAdminAction, iampolicy.DecommissionAdminAction)
if objectAPI == nil {
return
}
// Legacy args style such as non-ellipses style is not supported with this API.
if globalEndpoints.Legacy() {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
pools, ok := objectAPI.(*erasureServerPools)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
poolsStatus := make([]PoolStatus, len(globalEndpoints))
for idx := range globalEndpoints {
status, err := pools.Status(r.Context(), idx)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
poolsStatus[idx] = status
}
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(poolsStatus))
}
func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RebalanceStart")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.RebalanceAdminAction)
if objectAPI == nil {
return
}
// NB rebalance-start admin API is always coordinated from first pool's
// first node. The following is required to serialize (the effects of)
// concurrent rebalance-start commands.
if ep := globalEndpoints[0].Endpoints[0]; !ep.IsLocal {
for nodeIdx, proxyEp := range globalProxyEndpoints {
if proxyEp.Endpoint.Host == ep.Host {
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
return
}
}
}
}
pools, ok := objectAPI.(*erasureServerPools)
if !ok || len(pools.serverPools) == 1 {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
if pools.IsDecommissionRunning() {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errRebalanceDecommissionAlreadyRunning), r.URL)
return
}
if pools.IsRebalanceStarted() {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceAlreadyStarted), r.URL)
return
}
bucketInfos, err := objectAPI.ListBuckets(ctx, BucketOptions{})
if err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
buckets := make([]string, 0, len(bucketInfos))
for _, bInfo := range bucketInfos {
buckets = append(buckets, bInfo.Name)
}
var id string
if id, err = pools.initRebalanceMeta(ctx, buckets); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Rebalance routine is run on the first node of any pool participating in rebalance.
pools.StartRebalance()
b, err := json.Marshal(struct {
ID string `json:"id"`
}{ID: id})
if err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, b)
// Notify peers to load rebalance.bin and start rebalance routine if they happen to be
// participating pool's leader node
globalNotificationSys.LoadRebalanceMeta(ctx, true)
}
func (a adminAPIHandlers) RebalanceStatus(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RebalanceStatus")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.RebalanceAdminAction)
if objectAPI == nil {
return
}
// Proxy rebalance-status to first pool first node, so that users see a
// consistent view of rebalance progress even though different rebalancing
// pools may temporarily have out of date info on the others.
if ep := globalEndpoints[0].Endpoints[0]; !ep.IsLocal {
for nodeIdx, proxyEp := range globalProxyEndpoints {
if proxyEp.Endpoint.Host == ep.Host {
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
return
}
}
}
}
pools, ok := objectAPI.(*erasureServerPools)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
rs, err := rebalanceStatus(ctx, pools)
if err != nil {
if errors.Is(err, errRebalanceNotStarted) || errors.Is(err, errConfigNotFound) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceNotStarted), r.URL)
return
}
logger.LogIf(ctx, fmt.Errorf("failed to fetch rebalance status: %w", err))
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(rs))
}
func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RebalanceStop")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.RebalanceAdminAction)
if objectAPI == nil {
return
}
pools, ok := objectAPI.(*erasureServerPools)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
// Cancel any ongoing rebalance operation
globalNotificationSys.StopRebalance(r.Context())
writeSuccessResponseHeadersOnly(w)
logger.LogIf(ctx, pools.saveRebalanceStats(GlobalContext, 0, rebalSaveStoppedAt))
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package lambda
import "github.com/minio/minio/internal/event/target"
// Config - lambda target configuration structure, holds
// information about various lambda targets.
type Config struct {
Webhook map[string]target.WebhookArgs `json:"webhook"`
}
const (
defaultTarget = "1"
)
// NewConfig - initialize lambda config.
func NewConfig() Config {
// Make sure to initialize lambda targets
cfg := Config{
Webhook: make(map[string]target.WebhookArgs),
}
cfg.Webhook[defaultTarget] = target.WebhookArgs{}
return cfg
}
<file_sep># Identity Management Plugin Guide [](https://slack.minio.io)
## Introduction
To enable the integration of custom authentication methods, MinIO can be configured with an Identity Management Plugin webhook. When configured, this plugin enables the `AssumeRoleWithCustomToken` STS API extension. A user or application can now present a token to the `AssumeRoleWithCustomToken` API, and MinIO verifies this token by sending it to the Identity Management Plugin webhook. This plugin responds with some information and MinIO is able to generate temporary STS credentials to interact with object storage.
The authentication flow is similar to that of OpenID, however the token is "opaque" to MinIO - it is simply sent to the plugin for verification. CAVEAT: There is no console UI integration for this method of authentication and it is intended primarily for machine authentication.
It can be configured via MinIO's standard configuration API (i.e. using `mc admin config set/get`), or equivalently with environment variables. For brevity we show only environment variables here:
```sh
$ mc admin config set myminio identity_plugin --env
KEY:
identity_plugin enable Identity Plugin via external hook
ARGS:
MINIO_IDENTITY_PLUGIN_URL* (url) plugin hook endpoint (HTTP(S)) e.g. "http://localhost:8181/path/to/endpoint"
MINIO_IDENTITY_PLUGIN_AUTH_TOKEN (string) authorization token for plugin hook endpoint
MINIO_IDENTITY_PLUGIN_ROLE_POLICY* (string) policies to apply for plugin authorized users
MINIO_IDENTITY_PLUGIN_ROLE_ID (string) unique ID to generate the ARN
MINIO_IDENTITY_PLUGIN_COMMENT (sentence) optionally add a comment to this setting
```
If provided, the auth token parameter is sent as an authorization header.
`MINIO_IDENTITY_PLUGIN_ROLE_POLICY` is a required parameter and can be list of comma separated policy names.
On setting up the plugin, the MinIO server prints the Role ARN to its log. The Role ARN is generated by default based on the given plugin URL. To avoid this and use a configurable value set a unique role ID via `MINIO_IDENTITY_PLUGIN_ROLE_ID`.
## REST API call to plugin
To verify the custom token presented in the `AssumeRoleWithCustomToken` API, MinIO makes a POST request to the configured identity management plugin endpoint and expects a response with some details as shown below:
### Request `POST` to plugin endpoint
Query parameters:
| Parameter Name | Value Type | Purpose |
|----------------|------------|-------------------------------------------------------------------------|
| token | string | Token from the AssumeRoleWithCustomToken call for external verification |
### Response
If the token is valid and access is approved, the plugin must return a `200` (OK) HTTP status code.
A `200 OK` Response should have `application/json` content-type and body with the following structure:
```json
{
"user": <string>,
"maxValiditySeconds": <integer>,
"claims": <key-value-pairs>
}
```
| Parameter Name | Value Type | Purpose |
|--------------------|-----------------------------------------|--------------------------------------------------------|
| user | string | Identifier for owner of requested credentials |
| maxValiditySeconds | integer (>= 900 seconds and < 365 days) | Maximum allowed expiry duration for the credentials |
| claims | key-value pairs | Claims to be associated with the requested credentials |
The keys "exp", "parent" and "sub" in the `claims` object are reserved and if present are ignored by MinIO.
If the token is not valid or access is not approved, the plugin must return a `403` (forbidden) HTTP status code. The body must have an `application/json` content-type with the following structure:
```json
{
"reason": <string>
}
```
The reason message is returned to the client.
## Example Plugin Implementation
A toy example for the Identity Management Plugin is given [here](./identity-manager-plugin.go).
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package callhome
import "github.com/minio/minio/internal/config"
var (
defaultHelpPostfix = func(key string) string {
return config.DefaultHelpPostfix(DefaultKVS, key)
}
// HelpCallhome - provides help for callhome config
HelpCallhome = config.HelpKVS{
config.HelpKV{
Key: Enable,
Type: "on|off",
Description: "set to enable callhome" + defaultHelpPostfix(Enable),
Optional: true,
},
config.HelpKV{
Key: Frequency,
Type: "duration",
Description: "time duration between callhome cycles e.g. 24h" + defaultHelpPostfix(Frequency),
Optional: true,
},
}
)
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package amztime
import (
"strings"
"time"
)
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
const (
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with millisecond precision.
iso8601TimeFormatLong = "2006-01-02T15:04:05.000000Z" // Reply date format with nanosecond precision.
)
// ISO8601Format converts time 't' into ISO8601 time format expected in AWS S3 spec.
//
// This function is needed to avoid a Go's float64 precision bug, where Go avoids
// padding the extra '0' before the timezone.
func ISO8601Format(t time.Time) string {
value := t.Format(iso8601TimeFormat)
if len(value) < len(iso8601TimeFormat) {
value = t.Format(iso8601TimeFormat[:len(iso8601TimeFormat)-1])
// Pad necessary zeroes to full-fill the iso8601TimeFormat
return value + strings.Repeat("0", (len(iso8601TimeFormat)-1)-len(value)) + "Z"
}
return value
}
// ISO8601Parse parses ISO8601 date string
func ISO8601Parse(iso8601 string) (t time.Time, err error) {
for _, layout := range []string{
iso8601TimeFormat,
iso8601TimeFormatLong,
time.RFC3339,
} {
t, err = time.Parse(layout, iso8601)
if err == nil {
return t, nil
}
}
return t, err
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package http
// Object Lambda headers
const (
AmzRequestRoute = "x-amz-request-route"
AmzRequestToken = "<PASSWORD>-request-token"
AmzFwdStatus = "x-amz-fwd-status"
AmzFwdErrorCode = "x-amz-fwd-error-code"
AmzFwdErrorMessage = "x-amz-fwd-error-message"
AmzFwdHeaderAcceptRanges = "x-amz-fwd-header-accept-ranges"
AmzFwdHeaderCacheControl = "x-amz-fwd-header-Cache-Control"
AmzFwdHeaderContentDisposition = "x-amz-fwd-header-Content-Disposition"
AmzFwdHeaderContentEncoding = "x-amz-fwd-header-Content-Encoding"
AmzFwdHeaderContentLanguage = "x-amz-fwd-header-Content-Language"
AmzFwdHeaderContentRange = "x-amz-fwd-header-Content-Range"
AmzFwdHeaderContentType = "x-amz-fwd-header-Content-Type"
AmzFwdHeaderChecksumCrc32 = "x-amz-fwd-header-x-amz-checksum-crc32"
AmzFwdHeaderChecksumCrc32c = "x-amz-fwd-header-x-amz-checksum-crc32c"
AmzFwdHeaderChecksumSha1 = "x-amz-fwd-header-x-amz-checksum-sha1"
AmzFwdHeaderChecksumSha256 = "x-amz-fwd-header-x-amz-checksum-sha256"
AmzFwdHeaderDeleteMarker = "x-amz-fwd-header-x-amz-delete-marker"
AmzFwdHeaderETag = "x-amz-fwd-header-ETag"
AmzFwdHeaderExpires = "x-amz-fwd-header-Expires"
AmzFwdHeaderExpiration = "x-amz-fwd-header-x-amz-expiration"
AmzFwdHeaderLastModified = "x-amz-fwd-header-Last-Modified"
AmzFwdHeaderObjectLockMode = "x-amz-fwd-header-x-amz-object-lock-mode"
AmzFwdHeaderObjectLockLegalHold = "x-amz-fwd-header-x-amz-object-lock-legal-hold"
AmzFwdHeaderObjectLockRetainUntil = "x-amz-fwd-header-x-amz-object-lock-retain-until-date"
AmzFwdHeaderMPPartsCount = "x-amz-fwd-header-x-amz-mp-parts-count"
AmzFwdHeaderReplicationStatus = "x-amz-fwd-header-x-amz-replication-status"
AmzFwdHeaderSSE = "x-amz-fwd-header-x-amz-server-side-encryption"
AmzFwdHeaderSSEC = "x-amz-fwd-header-x-amz-server-side-encryption-customer-algorithm"
AmzFwdHeaderSSEKMSID = "x-amz-fwd-header-x-amz-server-side-encryption-aws-kms-key-id"
AmzFwdHeaderSSECMD5 = "x-amz-fwd-header-x-amz-server-side-encryption-customer-key-MD5"
AmzFwdHeaderStorageClass = "x-amz-fwd-header-x-amz-storage-class"
AmzFwdHeaderTaggingCount = "x-amz-fwd-header-x-amz-tagging-count"
AmzFwdHeaderVersionID = "x-amz-fwd-header-x-amz-version-id"
)
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"encoding/base64"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"io"
"net/http"
"strings"
"time"
jsoniter "github.com/json-iterator/go"
"github.com/klauspost/compress/zip"
"github.com/minio/kes-go"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/internal/bucket/lifecycle"
objectlock "github.com/minio/minio/internal/bucket/object/lock"
"github.com/minio/minio/internal/bucket/versioning"
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/bucket/policy"
iampolicy "github.com/minio/pkg/iam/policy"
)
const (
bucketQuotaConfigFile = "quota.json"
bucketTargetsFile = "bucket-targets.json"
)
// PutBucketQuotaConfigHandler - PUT Bucket quota configuration.
// ----------
// Places a quota configuration on the specified bucket. The quota
// specified in the quota configuration will be applied by default
// to enforce total quota for the specified bucket.
func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketQuotaConfig")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketQuotaAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
bucket := pathClean(vars["bucket"])
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
data, err := io.ReadAll(r.Body)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
quotaConfig, err := parseBucketQuota(bucket, data)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketQuotaConfigFile, data)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
bucketMeta := madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeQuotaConfig,
Bucket: bucket,
Quota: data,
UpdatedAt: updatedAt,
}
if quotaConfig.Quota == 0 {
bucketMeta.Quota = nil
}
// Call site replication hook.
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta))
// Write success response.
writeSuccessResponseHeadersOnly(w)
}
// GetBucketQuotaConfigHandler - gets bucket quota configuration
func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketQuotaConfig")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.GetBucketQuotaAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
bucket := pathClean(vars["bucket"])
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
config, _, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
configData, err := json.Marshal(config)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Write success response.
writeSuccessResponseJSON(w, configData)
}
// SetRemoteTargetHandler - sets a remote target for bucket
func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetBucketTarget")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := pathClean(vars["bucket"])
update := r.Form.Get("update") == "true"
// Get current object layer instance.
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketTargetAction)
if objectAPI == nil {
return
}
// Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
cred, _, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
password := <PASSWORD>.<PASSWORD>
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
var target madmin.BucketTarget
json := jsoniter.ConfigCompatibleWithStandardLibrary
if err = json.Unmarshal(reqBytes, &target); err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
sameTarget, _ := isLocalHost(target.URL().Hostname(), target.URL().Port(), globalMinioPort)
if sameTarget && bucket == target.TargetBucket {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBucketRemoteIdenticalToSource), r.URL)
return
}
target.SourceBucket = bucket
var ops []madmin.TargetUpdateType
if update {
ops = madmin.GetTargetUpdateOps(r.Form)
} else {
var exists bool // true if arn exists
target.Arn, exists = globalBucketTargetSys.getRemoteARN(bucket, &target, "")
if exists && target.Arn != "" { // return pre-existing ARN
data, err := json.Marshal(target.Arn)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Write success response.
writeSuccessResponseJSON(w, data)
return
}
}
if target.Arn == "" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
if globalSiteReplicationSys.isEnabled() && !update {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrRemoteTargetDenyAddError, err), r.URL)
return
}
if update {
// overlay the updates on existing target
tgt := globalBucketTargetSys.GetRemoteBucketTargetByArn(ctx, bucket, target.Arn)
if tgt.Empty() {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrRemoteTargetNotFoundError, err), r.URL)
return
}
for _, op := range ops {
switch op {
case madmin.CredentialsUpdateType:
if !globalSiteReplicationSys.isEnabled() {
// credentials update is possible only in bucket replication. User will never
// know the site replicator creds.
tgt.Credentials = target.Credentials
tgt.TargetBucket = target.TargetBucket
tgt.Secure = target.Secure
tgt.Endpoint = target.Endpoint
}
case madmin.SyncUpdateType:
tgt.ReplicationSync = target.ReplicationSync
case madmin.ProxyUpdateType:
tgt.DisableProxy = target.DisableProxy
case madmin.PathUpdateType:
tgt.Path = target.Path
case madmin.BandwidthLimitUpdateType:
tgt.BandwidthLimit = target.BandwidthLimit
case madmin.HealthCheckDurationUpdateType:
tgt.HealthCheckDuration = target.HealthCheckDuration
}
}
target = tgt
}
// enforce minimum bandwidth limit as 100MBps
if target.BandwidthLimit > 0 && target.BandwidthLimit < 100*1000*1000 {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrReplicationBandwidthLimitError, err), r.URL)
return
}
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, update); err != nil {
switch err.(type) {
case RemoteTargetConnectionErr:
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrReplicationRemoteConnectionError, err), r.URL)
default:
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
}
return
}
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
tgtBytes, err := json.Marshal(&targets)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
data, err := json.Marshal(target.Arn)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Write success response.
writeSuccessResponseJSON(w, data)
}
// ListRemoteTargetsHandler - lists remote target(s) for a bucket or gets a target
// for a particular ARN type
func (a adminAPIHandlers) ListRemoteTargetsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListBucketTargets")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := pathClean(vars["bucket"])
arnType := vars["type"]
// Get current object layer instance.
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.GetBucketTargetAction)
if objectAPI == nil {
return
}
if bucket != "" {
// Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if _, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
targets := globalBucketTargetSys.ListTargets(ctx, bucket, arnType)
data, err := json.Marshal(targets)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Write success response.
writeSuccessResponseJSON(w, data)
}
// RemoveRemoteTargetHandler - removes a remote target for bucket with specified ARN
func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RemoveBucketTarget")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := pathClean(vars["bucket"])
arn := vars["arn"]
// Get current object layer instance.
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketTargetAction)
if objectAPI == nil {
return
}
// Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if err := globalBucketTargetSys.RemoveTarget(ctx, bucket, arn); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
tgtBytes, err := json.Marshal(&targets)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Write success response.
writeSuccessNoContent(w)
}
// ExportBucketMetadataHandler - exports all bucket metadata as a zipped file
func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ExportBucketMetadata")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
bucket := pathClean(r.Form.Get("bucket"))
// Get current object layer instance.
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ExportBucketMetadataAction)
if objectAPI == nil {
return
}
var (
buckets []BucketInfo
err error
)
if bucket != "" {
// Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
buckets = append(buckets, BucketInfo{Name: bucket})
} else {
buckets, err = objectAPI.ListBuckets(ctx, BucketOptions{})
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
// Initialize a zip writer which will provide a zipped content
// of bucket metadata
zipWriter := zip.NewWriter(w)
defer zipWriter.Close()
rawDataFn := func(r io.Reader, filename string, sz int) error {
header, zerr := zip.FileInfoHeader(dummyFileInfo{
name: filename,
size: int64(sz),
mode: 0o600,
modTime: time.Now(),
isDir: false,
sys: nil,
})
if zerr != nil {
logger.LogIf(ctx, zerr)
return nil
}
header.Method = zip.Deflate
zwriter, zerr := zipWriter.CreateHeader(header)
if zerr != nil {
logger.LogIf(ctx, zerr)
return nil
}
if _, err := io.Copy(zwriter, r); err != nil {
logger.LogIf(ctx, err)
}
return nil
}
cfgFiles := []string{
bucketPolicyConfig,
bucketNotificationConfig,
bucketLifecycleConfig,
bucketSSEConfig,
bucketTaggingConfig,
bucketQuotaConfigFile,
objectLockConfig,
bucketVersioningConfig,
bucketReplicationConfig,
bucketTargetsFile,
}
for _, bi := range buckets {
for _, cfgFile := range cfgFiles {
cfgPath := pathJoin(bi.Name, cfgFile)
bucket := bi.Name
switch cfgFile {
case bucketNotificationConfig:
config, err := globalBucketMetadataSys.GetNotificationConfig(bucket)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
configData, err := xml.Marshal(config)
if err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
case bucketLifecycleConfig:
config, _, err := globalBucketMetadataSys.GetLifecycleConfig(bucket)
if err != nil {
if errors.Is(err, BucketLifecycleNotFound{Bucket: bucket}) {
continue
}
logger.LogIf(ctx, err)
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
configData, err := xml.Marshal(config)
if err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
case bucketQuotaConfigFile:
config, _, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket)
if err != nil {
if errors.Is(err, BucketQuotaConfigNotFound{Bucket: bucket}) {
continue
}
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
configData, err := json.Marshal(config)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
case bucketSSEConfig:
config, _, err := globalBucketMetadataSys.GetSSEConfig(bucket)
if err != nil {
if errors.Is(err, BucketSSEConfigNotFound{Bucket: bucket}) {
continue
}
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
configData, err := xml.Marshal(config)
if err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
case bucketTaggingConfig:
config, _, err := globalBucketMetadataSys.GetTaggingConfig(bucket)
if err != nil {
if errors.Is(err, BucketTaggingNotFound{Bucket: bucket}) {
continue
}
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
configData, err := xml.Marshal(config)
if err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
case objectLockConfig:
config, _, err := globalBucketMetadataSys.GetObjectLockConfig(bucket)
if err != nil {
if errors.Is(err, BucketObjectLockConfigNotFound{Bucket: bucket}) {
continue
}
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
configData, err := xml.Marshal(config)
if err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
case bucketVersioningConfig:
config, _, err := globalBucketMetadataSys.GetVersioningConfig(bucket)
if err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
// ignore empty versioning configs
if config.Status != versioning.Enabled && config.Status != versioning.Suspended {
continue
}
configData, err := xml.Marshal(config)
if err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
case bucketReplicationConfig:
config, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket)
if err != nil {
if errors.Is(err, BucketReplicationConfigNotFound{Bucket: bucket}) {
continue
}
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
configData, err := xml.Marshal(config)
if err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
case bucketTargetsFile:
config, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket)
if err != nil {
if errors.Is(err, BucketRemoteTargetNotFound{Bucket: bucket}) {
continue
}
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
configData, err := xml.Marshal(config)
if err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
}
}
}
}
type importMetaReport struct {
madmin.BucketMetaImportErrs
}
func (i *importMetaReport) SetStatus(bucket, fname string, err error) {
st := i.Buckets[bucket]
var errMsg string
if err != nil {
errMsg = err.Error()
}
switch fname {
case bucketPolicyConfig:
st.Policy = madmin.MetaStatus{IsSet: true, Err: errMsg}
case bucketNotificationConfig:
st.Notification = madmin.MetaStatus{IsSet: true, Err: errMsg}
case bucketLifecycleConfig:
st.Lifecycle = madmin.MetaStatus{IsSet: true, Err: errMsg}
case bucketSSEConfig:
st.SSEConfig = madmin.MetaStatus{IsSet: true, Err: errMsg}
case bucketTaggingConfig:
st.Tagging = madmin.MetaStatus{IsSet: true, Err: errMsg}
case bucketQuotaConfigFile:
st.Quota = madmin.MetaStatus{IsSet: true, Err: errMsg}
case objectLockConfig:
st.ObjectLock = madmin.MetaStatus{IsSet: true, Err: errMsg}
case bucketVersioningConfig:
st.Versioning = madmin.MetaStatus{IsSet: true, Err: errMsg}
default:
st.Err = errMsg
}
i.Buckets[bucket] = st
}
// ImportBucketMetadataHandler - imports all bucket metadata from a zipped file and overwrite bucket metadata config
// There are some caveats regarding the following:
// 1. object lock config - object lock should have been specified at time of bucket creation. Only default retention settings are imported here.
// 2. Replication config - is omitted from import as remote target credentials are not available from exported data for security reasons.
// 3. lifecycle config - if transition rules are present, tier name needs to have been defined.
func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ImportBucketMetadata")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ImportBucketMetadataAction)
if objectAPI == nil {
return
}
data, err := io.ReadAll(r.Body)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
reader := bytes.NewReader(data)
zr, err := zip.NewReader(reader, int64(len(data)))
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
bucketMap := make(map[string]struct{}, 1)
rpt := importMetaReport{
madmin.BucketMetaImportErrs{
Buckets: make(map[string]madmin.BucketStatus, len(zr.File)),
},
}
// import object lock config if any - order of import matters here.
for _, file := range zr.File {
slc := strings.Split(file.Name, slashSeparator)
if len(slc) != 2 { // expecting bucket/configfile in the zipfile
rpt.SetStatus(file.Name, "", fmt.Errorf("malformed zip - expecting format bucket/<config.json>"))
continue
}
bucket, fileName := slc[0], slc[1]
if fileName == objectLockConfig {
reader, err := file.Open()
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
config, err := objectlock.ParseObjectLockConfig(reader)
if err != nil {
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
continue
}
configData, err := xml.Marshal(config)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
if _, ok := bucketMap[bucket]; !ok {
opts := MakeBucketOptions{
LockEnabled: config.Enabled(),
}
err = objectAPI.MakeBucket(ctx, bucket, opts)
if err != nil {
if _, ok := err.(BucketExists); !ok {
rpt.SetStatus(bucket, fileName, err)
continue
}
}
bucketMap[bucket] = struct{}{}
}
// Deny object locking configuration settings on existing buckets without object lock enabled.
if _, _, err = globalBucketMetadataSys.GetObjectLockConfig(bucket); err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, objectLockConfig, configData)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
rpt.SetStatus(bucket, fileName, nil)
// Call site replication hook.
//
// We encode the xml bytes as base64 to ensure there are no encoding
// errors.
cfgStr := base64.StdEncoding.EncodeToString(configData)
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeObjectLockConfig,
Bucket: bucket,
ObjectLockConfig: &cfgStr,
UpdatedAt: updatedAt,
}); err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
}
}
// import versioning metadata
for _, file := range zr.File {
slc := strings.Split(file.Name, slashSeparator)
if len(slc) != 2 { // expecting bucket/configfile in the zipfile
rpt.SetStatus(file.Name, "", fmt.Errorf("malformed zip - expecting format bucket/<config.json>"))
continue
}
bucket, fileName := slc[0], slc[1]
if fileName == bucketVersioningConfig {
reader, err := file.Open()
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
v, err := versioning.ParseConfig(io.LimitReader(reader, maxBucketVersioningConfigSize))
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
if _, ok := bucketMap[bucket]; !ok {
if err = objectAPI.MakeBucket(ctx, bucket, MakeBucketOptions{}); err != nil {
if _, ok := err.(BucketExists); !ok {
rpt.SetStatus(bucket, fileName, err)
continue
}
}
bucketMap[bucket] = struct{}{}
}
if globalSiteReplicationSys.isEnabled() && v.Suspended() {
rpt.SetStatus(bucket, fileName, fmt.Errorf("Cluster replication is enabled for this site, so the versioning state cannot be suspended."))
continue
}
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled && v.Suspended() {
rpt.SetStatus(bucket, fileName, fmt.Errorf("An Object Lock configuration is present on this bucket, so the versioning state cannot be suspended."))
continue
}
if _, err := getReplicationConfig(ctx, bucket); err == nil && v.Suspended() {
rpt.SetStatus(bucket, fileName, fmt.Errorf("A replication configuration is present on this bucket, so the versioning state cannot be suspended."))
continue
}
configData, err := xml.Marshal(v)
if err != nil {
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
continue
}
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketVersioningConfig, configData); err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
rpt.SetStatus(bucket, fileName, nil)
}
}
for _, file := range zr.File {
reader, err := file.Open()
if err != nil {
rpt.SetStatus(file.Name, "", err)
continue
}
sz := file.FileInfo().Size()
slc := strings.Split(file.Name, slashSeparator)
if len(slc) != 2 { // expecting bucket/configfile in the zipfile
rpt.SetStatus(file.Name, "", fmt.Errorf("malformed zip - expecting format bucket/<config.json>"))
continue
}
bucket, fileName := slc[0], slc[1]
// create bucket if it does not exist yet.
if _, ok := bucketMap[bucket]; !ok {
err = objectAPI.MakeBucket(ctx, bucket, MakeBucketOptions{})
if err != nil {
if _, ok := err.(BucketExists); !ok {
rpt.SetStatus(bucket, "", err)
continue
}
}
bucketMap[bucket] = struct{}{}
}
if _, ok := bucketMap[bucket]; !ok {
continue
}
switch fileName {
case bucketNotificationConfig:
config, err := event.ParseConfig(io.LimitReader(reader, sz), globalSite.Region, globalEventNotifier.targetList)
if err != nil {
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
continue
}
configData, err := xml.Marshal(config)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketNotificationConfig, configData); err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
rulesMap := config.ToRulesMap()
globalEventNotifier.AddRulesMap(bucket, rulesMap)
rpt.SetStatus(bucket, fileName, nil)
case bucketPolicyConfig:
// Error out if Content-Length is beyond allowed size.
if sz > maxBucketPolicySize {
rpt.SetStatus(bucket, fileName, fmt.Errorf(ErrPolicyTooLarge.String()))
continue
}
bucketPolicyBytes, err := io.ReadAll(io.LimitReader(reader, sz))
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
bucketPolicy, err := policy.ParseConfig(bytes.NewReader(bucketPolicyBytes), bucket)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
// Version in policy must not be empty
if bucketPolicy.Version == "" {
rpt.SetStatus(bucket, fileName, fmt.Errorf(ErrPolicyInvalidVersion.String()))
continue
}
configData, err := json.Marshal(bucketPolicy)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketPolicyConfig, configData)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
rpt.SetStatus(bucket, fileName, nil)
// Call site replication hook.
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypePolicy,
Bucket: bucket,
Policy: bucketPolicyBytes,
UpdatedAt: updatedAt,
}); err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
case bucketLifecycleConfig:
bucketLifecycle, err := lifecycle.ParseLifecycleConfig(io.LimitReader(reader, sz))
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
// Validate the received bucket policy document
if err = bucketLifecycle.Validate(); err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
// Validate the transition storage ARNs
if err = validateTransitionTier(bucketLifecycle); err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
configData, err := xml.Marshal(bucketLifecycle)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketLifecycleConfig, configData); err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
rpt.SetStatus(bucket, fileName, nil)
case bucketSSEConfig:
// Parse bucket encryption xml
encConfig, err := validateBucketSSEConfig(io.LimitReader(reader, maxBucketSSEConfigSize))
if err != nil {
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
continue
}
// Return error if KMS is not initialized
if GlobalKMS == nil {
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s", errorCodes[ErrKMSNotConfigured].Description))
continue
}
kmsKey := encConfig.KeyID()
if kmsKey != "" {
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
_, err := GlobalKMS.GenerateKey(ctx, kmsKey, kmsContext)
if err != nil {
if errors.Is(err, kes.ErrKeyNotFound) {
rpt.SetStatus(bucket, fileName, errKMSKeyNotFound)
continue
}
rpt.SetStatus(bucket, fileName, err)
continue
}
}
configData, err := xml.Marshal(encConfig)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
// Store the bucket encryption configuration in the object layer
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketSSEConfig, configData)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
rpt.SetStatus(bucket, fileName, nil)
// Call site replication hook.
//
// We encode the xml bytes as base64 to ensure there are no encoding
// errors.
cfgStr := base64.StdEncoding.EncodeToString(configData)
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeSSEConfig,
Bucket: bucket,
SSEConfig: &cfgStr,
UpdatedAt: updatedAt,
}); err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
case bucketTaggingConfig:
tags, err := tags.ParseBucketXML(io.LimitReader(reader, sz))
if err != nil {
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
continue
}
configData, err := xml.Marshal(tags)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketTaggingConfig, configData)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
rpt.SetStatus(bucket, fileName, nil)
// Call site replication hook.
//
// We encode the xml bytes as base64 to ensure there are no encoding
// errors.
cfgStr := base64.StdEncoding.EncodeToString(configData)
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeTags,
Bucket: bucket,
Tags: &cfgStr,
UpdatedAt: updatedAt,
}); err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
case bucketQuotaConfigFile:
data, err := io.ReadAll(reader)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
quotaConfig, err := parseBucketQuota(bucket, data)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketQuotaConfigFile, data)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
rpt.SetStatus(bucket, fileName, nil)
bucketMeta := madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeQuotaConfig,
Bucket: bucket,
Quota: data,
UpdatedAt: updatedAt,
}
if quotaConfig.Quota == 0 {
bucketMeta.Quota = nil
}
// Call site replication hook.
if err = globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta); err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
}
}
rptData, err := json.Marshal(rpt.BucketMetaImportErrs)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, rptData)
}
// ReplicationDiffHandler - POST returns info on unreplicated versions for a remote target ARN
// to the connected HTTP client.
func (a adminAPIHandlers) ReplicationDiffHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ReplicationDiff")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ReplicationDiff)
if objectAPI == nil {
return
}
// Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
opts := extractReplicateDiffOpts(r.Form)
if opts.ARN != "" {
tgt := globalBucketTargetSys.GetRemoteBucketTargetByArn(ctx, bucket, opts.ARN)
if tgt.Empty() {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrInvalidRequest, fmt.Errorf("invalid arn : '%s'", opts.ARN)), r.URL)
return
}
}
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
defer keepAliveTicker.Stop()
diffCh, err := getReplicationDiff(ctx, objectAPI, bucket, opts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
enc := json.NewEncoder(w)
for {
select {
case entry, ok := <-diffCh:
if !ok {
return
}
if err := enc.Encode(entry); err != nil {
return
}
if len(diffCh) == 0 {
// Flush if nothing is queued
w.(http.Flusher).Flush()
}
case <-keepAliveTicker.C:
if len(diffCh) > 0 {
continue
}
if _, err := w.Write([]byte(" ")); err != nil {
return
}
w.(http.Flusher).Flush()
case <-ctx.Done():
return
}
}
}
<file_sep>// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"fmt"
"sync"
"time"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/pubsub"
)
const bootstrapMsgsLimit = 4 << 10
type bootstrapInfo struct {
msg string
ts time.Time
source string
}
type bootstrapTracer struct {
mu sync.RWMutex
idx int
info [bootstrapMsgsLimit]bootstrapInfo
lastUpdate time.Time
}
var globalBootstrapTracer = &bootstrapTracer{}
func (bs *bootstrapTracer) DropEvents() {
bs.mu.Lock()
defer bs.mu.Unlock()
if time.Now().UTC().Sub(bs.lastUpdate) > 24*time.Hour {
bs.info = [4096]bootstrapInfo{}
bs.idx = 0
}
}
func (bs *bootstrapTracer) Empty() bool {
var empty bool
bs.mu.RLock()
empty = bs.info[0].msg == ""
bs.mu.RUnlock()
return empty
}
func (bs *bootstrapTracer) Record(msg string, skip int) {
source := getSource(skip + 1)
bs.mu.Lock()
now := time.Now().UTC()
bs.info[bs.idx] = bootstrapInfo{
msg: msg,
ts: now,
source: source,
}
bs.lastUpdate = now
bs.idx = (bs.idx + 1) % bootstrapMsgsLimit
bs.mu.Unlock()
}
func (bs *bootstrapTracer) Events() []madmin.TraceInfo {
traceInfo := make([]madmin.TraceInfo, 0, bootstrapMsgsLimit)
// Add all messages in order
addAll := func(info []bootstrapInfo) {
for _, msg := range info {
if msg.ts.IsZero() {
continue // skip empty events
}
traceInfo = append(traceInfo, madmin.TraceInfo{
TraceType: madmin.TraceBootstrap,
Time: msg.ts,
NodeName: globalLocalNodeName,
FuncName: "BOOTSTRAP",
Message: fmt.Sprintf("%s %s", msg.source, msg.msg),
})
}
}
bs.mu.RLock()
addAll(bs.info[bs.idx:])
addAll(bs.info[:bs.idx])
bs.mu.RUnlock()
return traceInfo
}
func (bs *bootstrapTracer) Publish(ctx context.Context, trace *pubsub.PubSub[madmin.TraceInfo, madmin.TraceType]) {
if bs.Empty() {
return
}
for _, bsEvent := range bs.Events() {
select {
case <-ctx.Done():
default:
trace.Publish(bsEvent)
}
}
}
<file_sep>// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"testing"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7/pkg/set"
)
// TestGetMissingSiteNames
func TestGetMissingSiteNames(t *testing.T) {
testCases := []struct {
currSites []madmin.PeerInfo
oldDepIDs set.StringSet
newDepIDs set.StringSet
expNames []string
}{
// Test1: missing some sites in replicated setup
{
[]madmin.PeerInfo{
{Endpoint: "minio1:9000", Name: "minio1", DeploymentID: "dep1"},
{Endpoint: "minio2:9000", Name: "minio2", DeploymentID: "dep2"},
{Endpoint: "minio3:9000", Name: "minio3", DeploymentID: "dep3"},
},
set.CreateStringSet("dep1", "dep2", "dep3"),
set.CreateStringSet("dep1"),
[]string{"minio2", "minio3"},
},
// Test2: new site added that is not in replicated setup
{
[]madmin.PeerInfo{{Endpoint: "minio1:9000", Name: "minio1", DeploymentID: "dep1"}, {Endpoint: "minio2:9000", Name: "minio2", DeploymentID: "dep2"}, {Endpoint: "minio3:9000", Name: "minio3", DeploymentID: "dep3"}},
set.CreateStringSet("dep1", "dep2", "dep3"),
set.CreateStringSet("dep1", "dep2", "dep3", "dep4"),
[]string{},
},
// Test3: not currently under site replication.
{
[]madmin.PeerInfo{},
set.CreateStringSet(),
set.CreateStringSet("dep1", "dep2", "dep3", "dep4"),
[]string{},
},
}
for i, tc := range testCases {
names := getMissingSiteNames(tc.oldDepIDs, tc.newDepIDs, tc.currSites)
if len(names) != len(tc.expNames) {
t.Errorf("Test %d: Expected `%v`, got `%v`", i+1, tc.expNames, names)
}
}
}
<file_sep>//go:build linux
// +build linux
// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package http
import (
"context"
"net"
"syscall"
"time"
"golang.org/x/sys/unix"
)
func setTCPParametersFn(opts TCPOptions) func(network, address string, c syscall.RawConn) error {
return func(network, address string, c syscall.RawConn) error {
c.Control(func(fdPtr uintptr) {
// got socket file descriptor to set parameters.
fd := int(fdPtr)
_ = unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_REUSEADDR, 1)
_ = unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_REUSEPORT, 1)
// Enable TCP open
// https://lwn.net/Articles/508865/ - 16k queue size.
_ = syscall.SetsockoptInt(fd, syscall.SOL_TCP, unix.TCP_FASTOPEN, 16*1024)
// Enable TCP fast connect
// TCPFastOpenConnect sets the underlying socket to use
// the TCP fast open connect. This feature is supported
// since Linux 4.11.
_ = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, unix.TCP_FASTOPEN_CONNECT, 1)
// Enable TCP quick ACK, <NAME> says
// "Set TCP_QUICKACK. If you find a case where that makes things worse, let me know."
_ = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, unix.TCP_QUICKACK, 1)
// The time (in seconds) the connection needs to remain idle before
// TCP starts sending keepalive probes
_ = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPIDLE, 15)
// Number of probes.
// ~ cat /proc/sys/net/ipv4/tcp_keepalive_probes (defaults to 9, we reduce it to 5)
_ = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPCNT, 5)
// Wait time after successful probe in seconds.
// ~ cat /proc/sys/net/ipv4/tcp_keepalive_intvl (defaults to 75 secs, we reduce it to 15 secs)
_ = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, 15)
// Set tcp user timeout in addition to the keep-alive - tcp-keepalive is not enough to close a socket
// with dead end because tcp-keepalive is not fired when there is data in the socket buffer.
// https://blog.cloudflare.com/when-tcp-sockets-refuse-to-die/
// This is a sensitive configuration, it is better to set it to high values, > 60 secs since it can
// affect clients reading data with a very slow pace (disappropriate with socket buffer sizes)
_ = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, opts.UserTimeout)
if opts.Interface != "" {
if h, _, err := net.SplitHostPort(address); err == nil {
address = h
}
// Create socket on specific vrf device.
// To catch all kinds of special cases this filters specifically for loopback networks.
if ip := net.ParseIP(address); ip != nil && !ip.IsLoopback() {
_ = syscall.SetsockoptString(fd, syscall.SOL_SOCKET, syscall.SO_BINDTODEVICE, opts.Interface)
}
}
})
return nil
}
}
// DialContext is a function to make custom Dial for internode communications
type DialContext func(ctx context.Context, network, address string) (net.Conn, error)
// NewInternodeDialContext setups a custom dialer for internode communication
func NewInternodeDialContext(dialTimeout time.Duration, opts TCPOptions) DialContext {
return func(ctx context.Context, network, addr string) (net.Conn, error) {
dialer := &net.Dialer{
Timeout: dialTimeout,
Control: setTCPParametersFn(opts),
}
return dialer.DialContext(ctx, network, addr)
}
}
// NewCustomDialContext setups a custom dialer for any external communication and proxies.
func NewCustomDialContext(dialTimeout time.Duration, opts TCPOptions) DialContext {
return func(ctx context.Context, network, addr string) (net.Conn, error) {
dialer := &net.Dialer{
Timeout: dialTimeout,
Control: setTCPParametersFn(opts),
}
return dialer.DialContext(ctx, network, addr)
}
}
<file_sep>// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package mcontext
// Share a common context information between different
// packages in github.com/minio/minio
import (
xhttp "github.com/minio/minio/internal/http"
)
// ContextTraceType represents the type of golang Context key
type ContextTraceType string
// ContextTraceKey is the key of TraceCtxt saved in a Golang context
const ContextTraceKey = ContextTraceType("ctx-trace-info")
// TraceCtxt holds related tracing data of a http request.
type TraceCtxt struct {
RequestRecorder *xhttp.RequestRecorder
ResponseRecorder *xhttp.ResponseRecorder
FuncName string
AmzReqID string
}
<file_sep># Access Management Plugin Guide [](https://slack.minio.io)
MinIO now includes support for using an Access Management Plugin. This is to allow object storage access control to be managed externally via a webhook.
When configured, MinIO sends request and credential details for every API call to an external HTTP(S) endpoint and expects an allow/deny response. MinIO is thus able to delegate access management to an external system, and users are able to use a custom solution instead of S3 standard IAM policies.
Latency sensitive applications may notice an increased latency due to a request to the external plugin upon every authenticated request to MinIO. User are advised to provision their infrastructure such that latency and performance is acceptable.
## Quickstart
To easily try out the feature, run the included demo Access Management Plugin program in this directory:
```sh
go run access-manager-plugin.go
```
This program, lets the admin user perform any action and prevents all other users from performing `s3:Put*` operations.
In another terminal start MinIO:
```sh
export MINIO_CI_CD=1
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=<PASSWORD>
export MINIO_POLICY_PLUGIN_URL=http://localhost:8080/
minio server /tmp/disk{1...4}
```
Now, let's test it out with `mc`:
```sh
mc alias set myminio http://localhost:9000 minio minio123
mc ls myminio
mc mb myminio/test
mc cp /etc/issue myminio/test
mc admin user add myminio foo foobar123
export MC_HOST_foo=http://foo:foobar123@localhost:9000
mc ls foo
mc cp /etc/issue myminio/test/issue2
```
Only the last operation would fail with a permissions error.
## Configuration
Access Management Plugin can be configured with environment variables:
```sh
$ mc admin config set myminio policy_plugin --env
KEY:
policy_plugin enable Access Management Plugin for policy enforcement
ARGS:
MINIO_POLICY_PLUGIN_URL* (url) plugin hook endpoint (HTTP(S)) e.g. "http://localhost:8181/v1/data/httpapi/authz/allow"
MINIO_POLICY_PLUGIN_AUTH_TOKEN (string) authorization header for plugin hook endpoint
MINIO_POLICY_PLUGIN_ENABLE_HTTP2 (bool) Enable experimental HTTP2 support to connect to plugin service (default: 'off')
MINIO_POLICY_PLUGIN_COMMENT (sentence) optionally add a comment to this setting
```
By default this plugin uses HTTP 1.x. To enable HTTP2 use the `MINIO_POLICY_PLUGIN_ENABLE_HTTP2` environment variable.
## Request and Response
MinIO will make a `POST` request with a JSON body to the given plugin URL. If the auth token parameter is set, it will be sent as an authorization header.
The JSON body structure can be seen from this sample:
<details><summary>Request Body Sample</summary>
```json
{
"input": {
"account": "minio",
"groups": null,
"action": "s3:ListBucket",
"bucket": "test",
"conditions": {
"Authorization": [
"AWS4-HMAC-SHA256 Credential=minio/20220507/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=62012db6c47d697620cf6c68f0f45f6e34894589a53ab1faf6dc94338468c78a"
],
"CurrentTime": [
"2022-05-07T18:31:41Z"
],
"Delimiter": [
"/"
],
"EpochTime": [
"1651948301"
],
"Prefix": [
""
],
"Referer": [
""
],
"SecureTransport": [
"false"
],
"SourceIp": [
"127.0.0.1"
],
"User-Agent": [
"MinIO (linux; amd64) minio-go/v7.0.24 mc/DEVELOPMENT.2022-04-20T23-07-53Z"
],
"UserAgent": [
"MinIO (linux; amd64) minio-go/v7.0.24 mc/DEVELOPMENT.2022-04-20T23-07-53Z"
],
"X-Amz-Content-Sha256": [
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
],
"X-Amz-Date": [
"20220507T183141Z"
],
"authType": [
"REST-HEADER"
],
"principaltype": [
"Account"
],
"signatureversion": [
"AWS4-HMAC-SHA256"
],
"userid": [
"minio"
],
"username": [
"minio"
],
"versionid": [
""
]
},
"owner": true,
"object": "",
"claims": {},
"denyOnly": false
}
}
```
</details>
The response expected by MinIO, is a JSON body with a boolean:
```json
{
"result": true
}
```
The following structure is also accepted:
```json
{
"result": {
"allow": true
}
}
```
Any unmentioned JSON object keys in the above are ignored.
|
d8e998417adac2a7afc7ca16cb3ab7243ddadefc
|
[
"Markdown",
"Go",
"Go Module",
"Dockerfile",
"Shell"
] | 215 |
Go
|
codeworld-GitHub/minio
|
d2f5c3621ff5e8428e592f7029ba0e18cc9b3bcc
|
b6453d2a1fec92ebc47e1c41d2d732750e21dbff
|
refs/heads/master
|
<repo_name>auburner/gradle-homework<file_sep>/build.gradle
/*
* This build file was auto generated by running the Gradle 'init' task
* by 'DanWilkerson' at '9/12/16 11:08 AM' with Gradle 3.0
*
* This generated file contains a sample Java project to get you started.
* For more details take a look at the Java Quickstart chapter in the Gradle
* user guide available at https://docs.gradle.org/3.0/userguide/tutorial_java_projects.html
*/
// Apply the java plugin to add support for Java
apply plugin: 'java'
// In this section you declare where to find the dependencies of your project
repositories {
// Use 'jcenter' for resolving your dependencies.
// You can declare any Maven/Ivy/file repository here.
jcenter()
}
// In this section you declare the dependencies for your production and test code
dependencies {
// The production code uses the SLF4J logging API at compile time
compile 'org.slf4j:slf4j-api:1.7.21'
// Declare the dependency for your favourite test framework you want to use in your tests.
// TestNG is also supported by the Gradle Test task. Just change the
// testCompile dependency to testCompile 'org.testng:testng:6.8.1' and add
// 'test.useTestNG()' to your build script.
testCompile 'junit:junit:4.12'
}
task gradleToTheGrave {
doLast {
println '\nGradle to the grave\n'
}
task lyrics(dependsOn: gradleToTheGrave) << {
println "The blood that runs within my veins (the blood that runs within my veins)\
\nKeeps me from ever ending up the same (ending up the same)\
\n\
\nThe fire that's pushing me on and on and on (on and on and on)\
\nTo me it's everything and it makes me..."
}
}
<file_sep>/src/main/java/Library.java
/*
* This Java source file was auto generated by running 'gradle buildInit --type java-library'
* by 'DanWilkerson' at '9/12/16 11:08 AM' with Gradle 3.0
*
* @author DanWilkerson, @date 9/12/16 11:08 AM
*/
public class Library {
public boolean someLibraryMethod() {
return true;
}
}
|
5805b28ea96880ec59b826af4191a852be2f3e3a
|
[
"Java",
"Gradle"
] | 2 |
Gradle
|
auburner/gradle-homework
|
63815aa4dfe64444ce877ec0770e3d96cd353334
|
50e4a5f7ea5721e00c6ff45a4ada0b5dcfa9ba78
|
refs/heads/main
|
<repo_name>ChochoJose/WebLenguajeMarcas<file_sep>/JS/proyecto.js
var inicio = 0;
carrusel();
function carrusel(){
var i;
var x = document.getElementsByClassName("fotosGal");
for (var i = 0; i < x.length ; i++) {
x[i].style.display = "none";
}
inicio++;
if (inicio > x.length) {
inicio = 1;
}
x[inicio - 1].style.display = "flex";
setTimeout(carrusel,1800);
}
function desplegar(){
//en asigno a la variable menu el elemento menu
var menu = document.getElementById("menu");
//classlist devuelve el nombre de la clase y con toggle aรฑado o quito a la clase menu el estilo desplegado
menu.classList.toggle("desplegado");
var gal = document.getElementById("galeria");
gal.classList.toggle("esconder");
}
//creamos el script para validar el usuario con login
function validar(){
//Almacenar los valores de las cajas en los que introducimos datos
var mail, password, expresion;
mail = document.getElementById("mail").value;
password = document.getElementById("password").value;
//Estructura de un correo electronico <EMAIL>, es, net
expresion = /\w+@\w+\.+[a-z]/;
/*********************************************/
if (mail === "" || password === "") {
alert("Todos los campos deben ser rellenados.");
return false;
} else if(mail.length > 50){
alert("El nรบmero de caracteres introducidos supera el lรญmite permitido: 50");
return false;
}else if(!expresion.test(mail)){
alert("Correo no vรกlido, por favor introduzca un valor correcto");
return false;
}else if(password.length > 8){
alert("El nรบmero de caracteres introducidos supera el lรญmite");
return false;
}
}
//creamos el script para validar el usuario con registro
function registro(){
//Almacenar los valores de las cajas en los que introducimos datos
var usuario,mail, password, rpassword, expresion;
usuario = document.getElementById("usuario").value;
mail = document.getElementById("mail").value;
password = document.getElementById("password").value;
rpassword = document.getElementById("rpassword").value;
//Estructura de un correo electronico <EMAIL>, es, net
expresion = /\w+@\w+\.+[a-z]/;
/*********************************************/
if (usuario === "" || mail === "" || password === "" || rpassword === "") {
alert("Todos los campos deben ser rellenados.");
return false;
}else if(usuario.length > 50){
alert("El nรบmero de caracteres introducidos para el nombre del usuario supera el lรญmite permitido: 50");
return false;
} else if(mail.length > 50){
alert("El nรบmero de caracteres introducidos para el mail supera el lรญmite permitido: 50");
return false;
}else if(!expresion.test(mail)){
alert("Correo no vรกlido, por favor introduzca un valor correcto");
return false;
}else if(password.length > 8){
alert("El nรบmero de caracteres introducidos supera el lรญmite");
return false;
}else if(rpassword.length > 8){
alert("El nรบmero de caracteres introducidos supera el lรญmite");
return false;
}
if(password != rpassword){
alert("Las contraseรฑas introducidas no coinciden.");
return false;
}else{
alert(usuario + "Muchas gracias por registrarse con su correo "+correo+" en nuestra pรกgina web. En breve recivirรก una notificacion de que el proceso ha ido correctamente.");
return true;
}
}<file_sep>/Apple.html
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="stylesheet" type="text/css" href="ProyectoWeb.css">
<link rel="icon" type="imagen/ico" href="IMG/LogoWeb.jpg">
<script type="text/javascript" src="JS/proyecto.js"></script>
<title>Emporiotech</title>
</head>
<body>
<div class="principal">
<div class="header">
<div class="logo">
<img src="IMG/LogoWeb.png" id="LogoIMG">
</div>
<div class="nombre">
<h1 class="nombreEmp" align="center">EMPORIOTECH</h1>
</div>
</div>
<div class="cuerpo">
<a href="#" class="abrir" onclick="desplegar()">
<span class="linea"></span>
<span class="linea"></span>
<span class="linea"></span>
</a>
<div class="menu" id="menu">
<a href="#" class="cerrar" onclick="desplegar()">x</a>
<ul class="nav">
<li><a href="index.html">Inicio</a></li>
<li><a href="#">Servicios</a>
<ul>
<li><a href="mantenimiento.html">Mantenimiento</a></li>
<li><a href="gestionSistemas.html">Gestiรณn de Sistemas</a></li>
<li><a href="desProyectos.html">Desarrollo de Proyectos</a></li>
</ul>
</li>
<li><a href="#">Productos</a>
<ul>
<li><a href="Perifericos.html">Perifรฉricos</a></li>
<li><a href="#">Marcas Asociadas ></a>
<ul>
<li><a href="Apple.html">Apple</a></li>
<li><a href="MSI.html">MSI</a></li>
</ul>
</li>
</ul>
</li>
<li><a href="Instalaciones.html">Proyectos</a></li>
<li><a href="QuienesSomos.html">Quienes Somos</a></li>
<li><a href="Contacto.html">Contacto</a></li>
<li><a href="Cliente.html">Area de Clientes</a></li>
<li><a href="Noticias.html">Blog de Noticias</a></li>
</ul></div>
<div class="contenido">
<div class="centro">
<p>
<h2 class="intro">DISTRIBUIDOR AUTORIZADO APPLE.</h2>
<hr color="#DCDBDE">
<br>
<h3 class="desc">Contamos con un convenio directo con la empresa apple ya que nuestra cede en Estados Unidos esta adyacente a el nuevo apple park, contamos tanto con un amplio inventario de productos, como con todo tipo de servicio tรฉcnico que puedas necesitar, somos de los รบnicos centros en todo Madrid que cuenta con inventario dentro de tienda .</h3>
<h2 class="intro">Productos y Servicio Tรฉcnico</h2>
<h3 class="desc">Agendando tu cita podras comprar toda la gama de productos apple desde telรฉfonos, hasta ordenadores de escritorio, ademรกs podrรกs resolver tus inquietudes sobre alguna reparaciรณn o repuesto por grantรญa que necesites
</h3>
<br>
<div class="divgalerias">
<div class="contenedor-galeria">
<img src="IMG/iphone.jpg" class="fotosGal">
<img src="IMG/macbook.jpg" class="fotosGal">
<img src="IMG/appcare.png" class="fotosGal">
</div>
<script type="text/javascript">
var inicio = 0;
carrusel();
</script>
</div>
</p>
</div>
<div class="lateral">
<h2 class="intro">AGENDA UNA CITA CON NOSOTROS.</h2>
<hr color="#DCDBDE">
<br>
<form action="mailto:<EMAIL>" method="POST" name="DAM">
<p>
<label for="nombre" name="nombre">Nombre:</label>
<input type="text" name="nombre" required=""r>
</p>
<br>
<p>
<label for="datetime">Fecha y Hora:</label>
<input type="datetime-local" name="datetime">
</p>
<br>
<label for="comentarios">Razรณn de tu cita:</label>
<br>
<textarea name="comentarios" cols="27" rows="10"></textarea>
<br>
<p align="center">
<button type="submit" class="boton">Enviar</button>
<button type="reset" class="boton">Borrar</button>
</p>
</form>
<br>
<p align="center">
<img src="IMG/gracias.jpg" height="70px" width="140px">
</p>
</div>
</div>
</div>
<div class="footer">
<p align="center">Grupo Emporiotechยฎ Derechos de autor pagina Web: <NAME>, <NAME>, <NAME>. ยฉ</p>
</div>
</div>
</body>
</html>
<file_sep>/proyecto.js
var inicio = 0;
carrusel();
function carrusel(){
var i;
var x = document.getElementsByClassName("fotosGal");
for (var i = 0; i < x.length ; i++) {
x[i].style.display = "none";
}
inicio++;
if (inicio > x.length) {
inicio = 1;
}
x[inicio - 1].style.display = "flex";
setTimeout(carrusel,1800);
}
function validar(){
var mail, password, expresion;
mail = document.getElementById("mail").value;
password = document.getElementById("password").value;
expresion = /\w+@\w+\.+[a-z]/;
if (mail === "" || password === "") {
alert ("ESTOS CAMPOS SON OBLIGATORIOS");
return false;
}else if(mail.lenght > 30){
alert("SUPERA EL LIMITE DE CARACTERES");
return false;
}else if(!expresion.test(mail)){
alert("CORREO ELECTRONICO NO VALIDO");
return false;
}else if (password.lenght > 8) {
alert("CONTRASEรA INVALIDA");
return false;
}
}
|
e15dea34380d1b001320d5c4c9384fa5c0520551
|
[
"JavaScript",
"HTML"
] | 3 |
JavaScript
|
ChochoJose/WebLenguajeMarcas
|
e85269a28c49a9dfeadb2daede7b0f0d8d0e9664
|
ce89e174e7209f656711656503ba4bee6ff113aa
|
refs/heads/master
|
<repo_name>DevDataAnalyst/hoichoi<file_sep>/KKBOB_churn.R
# general visualisation
library('ggplot2') # visualisation
library('scales') # visualisation
library('grid') # visualisation
library('gridExtra') # visualisation
library('RColorBrewer') # visualisation
library('corrplot') # visualisation
library(plyr)
library(corrplot)
library(ggplot2)
library(gridExtra)
library(ggthemes)
library(caret)
library(MASS)
library(randomForest)
library(party)
# general data manipulation
library('dplyr') # data manipulation
library('readr') # input/output
library('data.table') # data manipulation
library('tibble') # data wrangling
library('tidyr') # data wrangling
library('stringr') # string manipulation
library('forcats') # factor manipulation
# Dates
library('lubridate') # date and time
# Extra vis
library('ggforce') # visualisation
# Define multiple plot function
#
# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
train <- as.tibble(fread('C:/Personal/Interview and Recruitment/SVF/KKBOX/KKBOXR/data/train.csv'))
#test <- as.tibble(fread('C:/Personal/Interview and Recruitment/SVF/KKBOX/KKBOXR/data/sample_submission.csv'))
members <- as.tibble(fread('C:/Personal/Interview and Recruitment/SVF/KKBOX/KKBOXR/data/members.csv', nrows = 1e6))
trans <- as.tibble(fread('C:/Personal/Interview and Recruitment/SVF/KKBOX/KKBOXR/data/transactions.csv', nrows = 1e6))
logs <- as.tibble(fread('C:/Personal/Interview and Recruitment/SVF/KKBOX/KKBOXR/data/user_logs.csv', nrows = 5e6))
str(train)
str(logs)
str(members)
str(trans)
## join tibbles
t2 = inner_join(logs,members, by = "msno")
t3 = inner_join(t2,trans, by = "msno")
t4 = inner_join(t3,train, by = "msno")
### dates
df <- transform(t4, date = as.Date(as.character(date), "%Y%m%d"))
df <- transform(df, registration_init_time = as.Date(as.character(registration_init_time), "%Y%m%d"))
df <- transform(df, transaction_date = as.Date(as.character(transaction_date), "%Y%m%d"))
df <- transform(df, membership_expire_date = as.Date(as.character(membership_expire_date), "%Y%m%d"))
df = distinct(df, msno, .keep_all = TRUE) # distinct users
df = df[sample(nrow(df), 1000), ]
##### LOGIT
intrain<- createDataPartition(df$is_churn,p=0.7,list=FALSE)
set.seed(1234)
training<- df[intrain,]
testing<- df[-intrain,]
dim(training); dim(testing)
# LogModel <- glm(training$is_churn ~ .,family=binomial(link="logit"),data=training)
# print(summary(LogModel))
#
# # XGBoost
#
# library(xgboost)
# library(readr)
# library(stringr)
# library(caret)
# library(car)
# library(Matrix)
#
#
# sparse_matrix <- sparse.model.matrix(response ~ .-1, data = df)
|
fba7bf507a77239b53d1515a15584390da164bab
|
[
"R"
] | 1 |
R
|
DevDataAnalyst/hoichoi
|
218fb3a62db6e5ddbfecde45dab85496f63df482
|
40d45eaca7b6b6b8228e8511334bb19327994f8d
|
refs/heads/master
|
<file_sep>package br.com.flaviotvrs.tutorial.dataprovider;
import java.util.List;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import br.com.flaviotvrs.tutorial.dataprovider.mapper.BookMapper;
import br.com.flaviotvrs.tutorial.dataprovider.repository.BookRepository;
import br.com.flaviotvrs.tutorial.usecase.entity.Book;
import br.com.flaviotvrs.tutorial.usecase.gateway.BookFindByAuthorGateway;
@Component
class BookFindByAuthorDataProvider implements BookFindByAuthorGateway {
private BookRepository repository;
@Autowired
public BookFindByAuthorDataProvider(BookRepository repository) {
this.repository = repository;
}
@Override
public List<Book> findByAuthor(Integer authorId) {
return BookMapper.toBusinessEntity(repository.findByAuthorId(authorId));
}
}
<file_sep>package br.com.flaviotvrs.tutorial.usecase.gateway;
import br.com.flaviotvrs.tutorial.usecase.entity.Book;
public interface BookFindByIdGateway {
Book findById(String bookId);
}
<file_sep>package br.com.flaviotvrs.tutorial.dataprovider.mapper;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import br.com.flaviotvrs.tutorial.dataprovider.repository.entity.BookEntity;
import br.com.flaviotvrs.tutorial.usecase.entity.Book;
public class BookMapper {
public static Book toBusinessEntity(Optional<BookEntity> entity) {
Book businessEntity = null;
if (entity.isPresent()) {
BookEntity bookEntity = entity.get();
businessEntity = Book.builder().id(bookEntity.getId()).name(bookEntity.getName())
.pageCount(bookEntity.getPageCount()).authorId(bookEntity.getAuthorId())
.published(bookEntity.getPublished()).isbn10(bookEntity.getIsbn10()).isbn13(bookEntity.getIsbn13())
.build();
}
return businessEntity;
}
public static List<Book> toBusinessEntity(List<BookEntity> list) {
return list.stream().map(entity -> toBusinessEntity(Optional.ofNullable(entity))).collect(Collectors.toList());
}
}
<file_sep>database=mysql
spring.jpa.database-platform=org.hibernate.dialect.MySQL5InnoDBDialect
spring.jpa.show-sql=true
spring.datasource.driverClassName=com.mysql.cj.jdbc.Driver
spring.datasource.url=jdbc:mysql://localhost/graphql_demo_db?useTimezone=true&serverTimezone=UTC
spring.datasource.username=gqluser
spring.datasource.password=<PASSWORD>
spring.datasource.platform=<file_sep>package br.com.flaviotvrs.tutorial.dataprovider.repository;
import java.util.List;
import org.springframework.data.jpa.repository.JpaRepository;
import br.com.flaviotvrs.tutorial.dataprovider.repository.entity.BookEntity;
public interface BookRepository extends JpaRepository<BookEntity, String> {
List<BookEntity> findByNameIgnoreCaseContaining(String bookName);
List<BookEntity> findByAuthorId(Integer authorId);
}
<file_sep>package br.com.flaviotvrs.tutorial.dataprovider;
import java.util.List;
import java.util.stream.Collectors;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.PageRequest;
import org.springframework.stereotype.Component;
import br.com.flaviotvrs.tutorial.dataprovider.mapper.BookMapper;
import br.com.flaviotvrs.tutorial.dataprovider.repository.BookRepository;
import br.com.flaviotvrs.tutorial.dataprovider.repository.entity.BookEntity;
import br.com.flaviotvrs.tutorial.usecase.entity.Book;
import br.com.flaviotvrs.tutorial.usecase.gateway.BookFindAllGateway;
@Component
class BookFindAllDataProvider implements BookFindAllGateway {
private BookRepository repository;
@Autowired
public BookFindAllDataProvider(BookRepository repository) {
this.repository = repository;
}
@Override
public List<Book> findAll(int page, int size) {
Page<BookEntity> findAll = repository.findAll(PageRequest.of(page, size));
return BookMapper.toBusinessEntity(findAll.get().collect(Collectors.toList()));
}
}
<file_sep>server.port=8080
server.compression.enabled=true
server.compression.mime-types=application/json
server.compression.min-response-size=1024
graphql.servlet.mapping=/graphql
graphql.servlet.enabled=true
graphql.servlet.corsEnabled=true
spring.datasource.url=jdbc:h2:mem:testdb
spring.datasource.username=sa
spring.datasource.password=
spring.datasource.platform=h2sandbox
spring.datasource.sql-script-encoding=UTF-8
spring.jpa.database-platform=org.hibernate.dialect.H2Dialect
spring.jpa.show-sql=true
spring.jpa.hibernate.ddl-auto=update
spring.h2.console.enabled=true
spring.h2.console.path=/h2-console
logging.level.org.hibernate.SQL=INFO
logging.level.org.hibernate.type.descriptor.sql.BasicBinder=INFO
logging.level.br.com.flaviotvrs=INFO<file_sep># GraphQL Tutorial Implementation
This is an implementation of [GraphQL Tutorial for Java](https://www.graphql-java.com/tutorials/getting-started-with-spring-boot/) using [Clean Architecture principles](https://medium.freecodecamp.org/a-quick-introduction-to-clean-architecture-990c014448d2). This application also gives an example of how to use GraphQL with data stored in a database.
The purpose of this project is learning GraphQL technology, how to develop and how to test it.
## Getting Started
### Run Locally
```sh
$ mvn spring-boot:run
```
Application will be available at http://localhost:8080/graphql .
### Sample Query
The sample query below searches for books that contains the word "Tower" in the name, returns book and its author information:
```sh
curl \
-X POST \
-H "Content-Type: application/json" \
--data '{ "query" : "{ bookByName(name: \"Tower\"){ id name pageCount author { firstName lastName } } }" }' \
http://localhost:8080/graphql
```
### Going further
You can navigate through all data provided by this application visiting [this file](src/main/resources/schema.graphqls).
## Built With
* [Maven](https://maven.apache.org/) - Dependency Management.
* [Spring-Boot](https://spring.io/projects/spring-boot) - Spring Boot makes it easy to create stand-alone, production-grade Spring based Applications that you can "just run".
* [GraphQL](https://graphql.org/) - GraphQL is a query language for APIs and a runtime for fulfilling those queries with your existing data.
* [H2 Database](https://www.h2database.com) - In-memory database.<file_sep>package br.com.flaviotvrs.tutorial.dataprovider.repository.entity;
import java.util.Date;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.Id;
import javax.persistence.Table;
import javax.persistence.Temporal;
import javax.persistence.TemporalType;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
@Entity
@Table(name = "BOOK")
@Data
@AllArgsConstructor
@NoArgsConstructor
@Builder
public class BookEntity {
@Id
@Column(name = "ID", nullable = false, length = 50)
private String id;
@Column(name = "NAME", nullable = false, length = 200)
private String name;
@Column(name = "PAGE_COUNT", nullable = false, precision = 9, scale = 0)
private Integer pageCount;
@Column(name = "AUTHOR_ID", nullable = false, length = 50)
private Integer authorId;
@Temporal(TemporalType.DATE)
@Column(name = "PUBLISHED", nullable = false)
private Date published;
@Column(name = "ISBN_10", nullable = false, length = 10)
private String isbn10;
@Column(name = "ISBN_13", nullable = false, length = 13)
private String isbn13;
}
|
696824c0d9185ddd75843cfffd7d4886eaaced5c
|
[
"Markdown",
"Java",
"INI"
] | 9 |
Java
|
flaviotvrs/graphql-demo-project
|
1530558df0f5448d0901d481c916be818256f58b
|
06059a2078405d8a2cb0f11f86ec57ebc7e7435a
|
refs/heads/master
|
<file_sep>//create section's svg
function Section(svgSelector) {
// var $this = this;
// //ะทะฐะณะฐะปัะฝะต svg
// $this.svgCommon = $(document).find('.' + svgClass);
// $this.sectionFlats = $this.svgCommon.find('g[id*="rooms"] > *');
// $this.sectionPorch = $this.svgCommon.find('g[id*="porch"] > *');
// $this.sectionPorch = $this.svgCommon.find('g[id*="porch"] > *');
//ะทะฐะณะฐะปัะฝะต svg
this.svgCommon = $(document).find(svgSelector);
//ะฒัั ะบะฒะฐััะธัะธ
this.getSectionsFlats = function() {
return this.svgCommon.find('g[id*="rooms"] > *');
};
this.getSectionsPorch = function() {
return this.svgCommon.find('g[id*="porch"] > *');
};
//ะฒัั ัะตะบััั
this.getSectionsSpace = function() {
return this.svgCommon.find('g[id*="sections"] > *');
};
//ะบัะปัะบัััั ัะตะบััะน
this.getSectionQnt = function() {
return this.getSectionsSpace().length;
};
//ัะพัะผัะฒะฐะฝะฝั html ัะตะบััะน
this.createSectionHtml = function(flats) {
var sections = [];
var self = this;
var sectionsSpaces = this.getSectionsSpace();
//ััะฒะพัะตะฝะฝั ะผะฐัะธะฒั ะบะฒะฐััะธั ะฒััะพะณะพ ะฟะพะฒะตัั
ั
var sectionsFlats = this.getSectionsFlats();
var flats = [];
sectionsFlats.each(function() {
//flats.unshift($(this).clone());
flats.push($(this).clone()); //flat-floor
});
// ะบัะปัะบัััั ะบะฒะฐััะธั ะฒ ัะตะบััั
var sectionFlatsCount = flats.length / this.getSectionQnt();
sectionsSpaces.each(function() {
var sectionData = {
space: self.setSectionSpace($(this).clone()), // html
flats: self.setSectionFlats(flats, sectionFlatsCount), //html
porch: self.getSectionsPorch().length ? self.setSectionPorch($(this).attr('id').split('sect-')[1]) : '' //html
};
var svgCommonParams = self.svgCommon[0].getAttribute('viewBox').split(' ');
var width = (svgCommonParams[2] / self.getSectionQnt()).toFixed(2);
var height = svgCommonParams[3];
var porch = '';
if(sectionData.porch != '') {
porch = '<g id="big--section-'+ self.SectionSpaceNumber +'--porch--' + self.svgCommon.attr('id') + '">' +
sectionData.porch +
'</g>'
}
var section = '<svg id="big--section-'+ self.SectionSpaceNumber +'--' + self.svgCommon.attr('id') + '"' +
'viewBox="0 0 ' + width + ' ' + height + '">' +
'<g id="big--section-'+ self.SectionSpaceNumber +'--sections--' + self.svgCommon.attr('id') + '">' +
sectionData.space +
'</g>' +
porch +
'<g id="big--section-'+ self.SectionSpaceNumber +'--rooms--' + self.svgCommon.attr('id') + '">' +
sectionData.flats +
'</g>'
'</svg>';
sections.unshift(section)
});
return sections;
};
// ัะพะฑะพัะฐ ะท ะตะป. ะฟะปะพัั ะบะพะถะฝะพั ัะตะบััั
// ะฟะตัะตััะฒะฐะฝะฝั ะตะปะตะผะตะฝัั ัะตะบััั ะดะพ ะฟะพัะฐัะบั svg
this.setSectionSpace = function(that) {
this.SectionSpaceNumber = that.attr('id').split('sect-')[1];
var firstSpace = this.getSectionsSpace();
// ะผะพะถััั ะฑััะธ ััะทะฝั ัะตะณะธ ะฒ space ััะทะฝะธั ัะตะบััะน
if (that.attr('points')) {
that.attr('points', firstSpace.attr('points'));
} else if (that.attr('x')) {
that.attr('x', firstSpace.attr('x'))
}
that.attr('id', 'big--' + that.attr('id'));
return that[0].outerHTML;
};
// ัะพะฑะพัะฐ ะท ะบะฒะฐััะธัะฐะผะธ ะบะพะถะฝะพั ัะตะบััั
//ะฟะตัะตััะฒะฐะฝะฝั ะบะฒะฐััะธั ะดะพ ะฟะพัะฐัะบั ัะตะบััั
this.setSectionFlats = function(flats, sectionFlatsCount) {
//ัะพัะผัะฒะฐะฝะฝั ะผะฐัะธะฒั ะบะฒะฐััะธั ะพะดะฝะพั ัะตะบััั
var sectionFlats = [];
for (var i = 0; i < sectionFlatsCount; i++) {
// sectionFlats.unshift(flats.pop());
sectionFlats.push(flats.shift());
}
//console.log(this.getSectionsFlats()[0]);
var flatsHtml = '';
for(var j = 0; j < sectionFlats.length; j++ ) {
if (sectionFlats[j].attr('points')) {
sectionFlats[j].attr('points', $(this.getSectionsFlats()[j]).attr('points'));
} else if (sectionFlats[j].attr('x')) {
sectionFlats[j].attr('x', $(this.getSectionsFlats()[j]).attr('x'));
}
sectionFlats[j].attr('id', 'big--' + sectionFlats[j].attr('id'));
// console.log(sectionFlats[j]);
flatsHtml += sectionFlats[j][0].outerHTML;
}
//console.log(flatsHtml);
return flatsHtml;
};
this.setSectionPorch = function(number) {
var porches = this.getSectionsPorch();
//console.log('porches', porches);
var porch = '';
porches.each(function() {
if($(this).attr('id').indexOf('porch-' + number) != -1)
//number[0], ะฑะพ ะฝะตะฟัะฐะฒะธะปัะฝะต id ัะตะบััั
porch = $(this).clone();
});
porch.attr('id', 'big--' + porch.attr('id'));
// ะผะพะถััั ะฑััะธ ััะทะฝั ัะตะณะธ ะฒ space ััะทะฝะธั ัะตะบััะน
if (porch.attr('points')) {
porch.attr('points', porches.attr('points'));
} else if (porch.attr('x')) {
porch.attr('x', porches.attr('x'))
}
return porch[0].outerHTML;
};
}
<file_sep>/* ------------------------------- */
//create section svg
/* ------------------------------- */
function addSvgToSlider(sectionsSVG) {
var self = this;
self.sectionCarousel = $('.flat-sector__slider');
if ($(document).find('.flat-sector__map-big ').length) {
self.sectionCarousel.slick('unslick');
self.sectionCarousel.html('');
}
$.each(sectionsSVG, function(i, el) {
self.sectionCarousel.prepend('<div class="flat-sector__map-big">' + el + '</div>');
//ะท ะฟะตััะพั ัะตะบััั
//self.sectionCarousel.append('<div class="flat-sector__map-big">' + el + '</div>');
if (i == sectionsSVG.length - 1) {
self.createSectionSlider()
}
});
}
function createSectionSlider() {
this.sectionCarousel.slick({
infinite: false,
prevArrow: '<button type="button" class="flat-sector__nav flat-sector__nav_type_left button-reset">' +
'<i class="fa fa-angle-left fa-4x" aria-hidden="true"></i>' +
'</button>',
nextArrow: '<button type="button" class="flat-sector__nav flat-sector__nav_type_right button-reset">' +
'<i class="fa fa-angle-right fa-4x" aria-hidden="true"></i>' +
'</button>'
});
setTimeout(function() {
$(".flat-sector__preloader").css('display', 'none');
}, 400);
flatSlider.goBackActiveFlat();
}
/* ------------------------------- */
//flat slider
/* ------------------------------- */
$('.flat-sector__slider').on('init afterChange', function (event, slick) {
flatSlider.markActiveSection();
flatSlider.flatsFloor();
if (!$('#control').attr('data-flat')) {
$($('.flat-sector__number')[0]).addClass('active-flat__item');
}
});
$('.flat-sector__slider').on('beforeChange', function (event, slick) {
$('.flat-sector__number').remove();
});
var flatSlider = {
markActiveSection: function () {
var allFlats = document.querySelectorAll('[id *= rooms] *');
for (var j = 0; j < allFlats.length; j++) {
var flatC = allFlats[j].classList[0];
allFlats[j].setAttribute('class', flatC);
}
var section = document.querySelector('.flat-sector__slider .slick-active');
var flats = section.querySelectorAll('[id *= rooms] *');
var flatsId = [];
for (var i = 0; i < flats.length; i++) {
var flatId = flats[i].getAttribute('id').substr(5);
var flat = document.getElementById(flatId);
var flatClass = flat.getAttribute('class');
flat.setAttribute('class', flatClass + ' active-flat');
}
var sectionNumber = section.querySelector('[id *= sections--] > * ').getAttribute('id');
sectionNumber = sectionNumber.split('sect-')[1];
$('.flat-sector__map-section-number').text(sectionNumber);
},
flatsFloor: function () {
var flats = $('.flat-sector__map-big.slick-current').find('.st2');
var sectionCoord = $('.flat-sector__map-big.slick-current svg')[0].getBBox();
var sectionWidth = sectionCoord.width;
var sectionHeight = sectionCoord.height;
var minSvg = $('.flat-sector__map-min-svg-img').attr('id');
houseNumber = minSvg.substring(minSvg.indexOf('--h-') + 4, minSvg.indexOf('--f-'));
$('.flat-sector__house-number').html('ะะพะผ ' + houseNumber);
$.each(flats, function (index, el) {
flatId = $(el).attr('id');
flatNumber = flatId.substring(flatId.indexOf('--r-') + 4);
flatSquare = flatId.substring(flatId.indexOf('--h-') + 4, flatId.indexOf('--f-')) + index;
flatRooms = flatId.substring(flatId.indexOf('--f-') + 4, flatId.indexOf('--r-')) + index;
topNumber = $('.flat-sector__map-big.slick-current').find('#' + flatId).offset().top;
leftNumber = $('.flat-sector__map-big.slick-current').find('#' + flatId).offset().left;
// flatWidth = $('.flat-sector__map-big.slick-current').find('#' + flatId)[0].getBBox().width - 22;
// flatHeight = $('.flat-sector__map-big.slick-current').find('#' + flatId)[0].getBBox().height / 2;
//
// leftMap = $('.flat-sector__body').offset().left;
//
// topMap = $('.flat-sector__body').offset().top;
var flatCoord = el.getBBox(),
flatX = (flatCoord.x + ( flatCoord.width / 2)) * 100 / sectionWidth,
flatY = (flatCoord.y + ( flatCoord.height / 2)) * 100 / sectionHeight;
// if (index == 0) {
// $('.flat-sector__body').append('<div class="flat-sector__number" data-id=' + flatId + ' data-number=' + flatNumber + ' data-rooms=' + flatRooms + ' data-square=' + flatSquare + '>' + flatNumber + '</div>');
// } else {
// $('.flat-sector__body').append('<div class="flat-sector__number" data-id=' + flatId + ' data-number=' + flatNumber + ' data-rooms=' + flatRooms + ' data-square=' + flatSquare + '>' + flatNumber + '</div>');
// }
// $('.flat-sector__body').append('<div class="flat-sector__number" data-id=' + flatId + ' data-number=' + flatNumber + ' data-rooms=' + flatRooms + ' data-square=' + flatSquare + '>' + flatNumber + '</div>');
// $('.flat-sector__number[data-id=' + flatId + ']').css({ 'top': topNumber - topMap + flatHeight, 'left': leftNumber - leftMap + flatWidth });
if (index == 0) {
$('.flat-sector__body').append('<div class="flat-sector__number active-flatItem" data-id=' + flatId + ' data-number=' + flatNumber + ' data-rooms=' + flatRooms + ' data-square=' + flatSquare + '>' + flatNumber + '</div>');
$(el).attr('class', 'st2 active-flat');
} else {
$('.flat-sector__body').append('<div class="flat-sector__number" data-id=' + flatId + ' data-number=' + flatNumber + ' data-rooms=' + flatRooms + ' data-square=' + flatSquare + '>' + flatNumber + '</div>');
}
$('.flat-sector__number[data-id=' + flatId + ']').css({
'top': flatY + '%',
'left': flatX + '%'
});
});
//ะฟะพะทะฝะฐัะตะฝะฝั ะฐะบัะธะฒะฝะพั ะบะฒะฐััะธัะธ ะฟัะธ ะฟะพะฒะตัะฝะตะฝะฝั ะท ัั ััะพััะฝะบะธ
var div = $(document).find('#selected-flat');
var flatNumber = div.data('flat');
var selectedFlat = $(document).find('.flat-sector__map-big.slick-current *[id *= --r-' + flatNumber + ']');
if (flatNumber && selectedFlat.length) {
$(document).find('.flat-sector__number').removeClass('active-flatItem');
var firstFlat = $(document).find(".flat-sector__slider .active-flat");
var firstFlatClass = (firstFlat.attr('class') + '').split(' ')[0];
$(document).find(".flat-sector__slider .active-flat").attr('class', firstFlatClass);
var selectedFlatClass = (selectedFlat.attr('class') + '').split(" ");
selectedFlat.attr('class', selectedFlatClass + ' active-flat');
$(document).find('.flat-sector__number[data-number *= ' + flatNumber + ']')
.addClass('active-flatItem');
}
//goBackActiveFlat()
flatSlider.getFlatData();
flats.hover(function () {
var id = $(this).attr('id');
flats.attr('class', 'st2');
$(this).attr('class', 'st2 active-flat');
$('.flat-sector__number').removeClass('active-flatItem');
$('.flat-sector__number[data-id=' + id + ']').addClass('active-flatItem');
flatSlider.getFlatData()
});
flats.click(function () {
goToFlat();
});
$('.flat-sector__number').click(function () {
goToFlat();
});
},
getFlatData: function () {
var activeId = $('.flat-sector__number.active-flatItem').data('id');
var countRooms = $('.flat-info[data-flatId=' + activeId + ']').data('room');
var squareRoom = $('.flat-info[data-flatId=' + activeId + ']').data('square');
var roomNumber = $('.flat-info[data-flatId=' + activeId + ']').data('number')
$('.flat-info__rooms').html(countRooms + '-x ะบะพะผะฝะฐัะฝะฐั');
$('.flat-info__square').html(squareRoom + ' ะผ <sup>2</sup>');
$('.flat-sector__flat-number-text').html('โ' + roomNumber);
},
goBackActiveFlat: function () {
var div = $(document).find('#selected-flat');
var floor = div.data('floor');
var flat = div.data('flat');
if (floor && flat) {
var slider = $('.flat-sector__slider');
var activeFlat = flat;
slider.find('.st2').attr('class', 'st2');
$.each(slider[0].slick.$slides, function (i, slide) {
if ($(slide).find('[id *= --r-'+ activeFlat +']').length) {
slider.slick('slickGoTo', i);
}
});
}
}
};
/* ------------------------------- */
//main svg
/* ------------------------------- */
$(document).on('click', '.flat-sector__map-min-svg-img g *', function () {
var id = $(this).attr('id');
var slider = $('.flat-sector__slider');
var slideIndex = '';
$.each(slider[0].slick.$slides, function (i, slide) {
if ($(slide).find('#big--' + id).length)
slideIndex = i
});
slider.slick('slickGoTo', slideIndex);
});<file_sep>var requireDir = require('require-dir');
var gulp = require('gulp');
var notify = require('gulp-notify');
var concatCss = require('gulp-concat-css');
var rename = require('gulp-rename');
var minifyCSS = require('gulp-minify-css');
var clean = require('gulp-clean');
var uglify = require('gulp-uglify');
var concat = require('gulp-concat');
var jade = require('jade');
var gulpJade = require('gulp-jade');
var pug = require('pug');
var gulpPug = require('gulp-pug');
var autoprefixer = require('gulp-autoprefixer');
var streamqueue = require('streamqueue');
var browserSync = require('browser-sync');
var babel = require('gulp-babel');
$ = require('gulp-load-plugins')();
var project = require('./project.conf');
gulp.task('default', function() {
return gulp.src('./').pipe(notify('\r\n' +
'build - compile project\r\n' +
'unBuild - remove project folder'));
});
gulp.task('unBuild', function() {
return gulp.src('public', { read: false })
.pipe(clean());
});
/**
* ะะธะปะด ะฒัะตะณะพ ะฟัะพะตะบัะฐ
* Build all project
*/
gulp.task('build', ['templates', 'build-css', 'build-font', 'build-bower', 'build-js', 'build-img', 'gDir']);
gulp.task('run', ['templates', 'build-css', 'build-js', 'gDir', 'browser-sync', 'watch'])
gulp.task('templates', function() {
return gulp.src('./common/template/*.pug')
.pipe(gulpPug({
pretty: true
}))
.pipe(gulp.dest(project.build))
.pipe(browserSync.stream());
});
gulp.task('gDir', function() {
return gulp.src(['./common/template/*.*', '!./common/template/*.pug'])
.pipe(gulp.dest(project.build))
.pipe(browserSync.stream());
});
gulp.task('browser-sync', function() {
browserSync.init({
proxy: "complex/public"
});
});
/**
* ะะธะปะด common/css ะฟัะพะตะบัะฐ
* Build common/css in project
*/
gulp.task('build-css', function() {
var buildCss = gulp.src('./common/css/**/*.css')
.pipe(autoprefixer({
browsers: ['last 15 versions', 'Explorer > 8'],
cascade: false
}))
.pipe($.concat('assets/css/style.css'))
.pipe(minifyCSS())
.pipe(rename('assets/css/main.min.css'))
.pipe(gulp.dest(project.build + '/'))
//.pipe(browserSync.stream());
return function() {
buildCss;
};
});
/**
* ะะธะปะด common/js ะฟัะพะตะบัะฐ
* Build common/js in project
*/
// gulp.task('babel', function() {
// gulp.src('./common/js/class/sectionClass.js')
// .pipe(babel({
// presets: ["es2015"]
// }))
// .pipe(gulp.dest('./common/js/'))
// });
gulp.task('build-js', function() {
// var buildJs = gulp.src('./common/js/**/*.js')
/*
.pipe(concat('main.js'))
.pipe(uglify())
.pipe(rename('main.min.js'))
.pipe(gulp.dest(project.build + '/assets/js/'));
return function () {
buildJs;
};
*/
gulp.src(['./common/js/**/*.js', './common/js/*.js'])
.pipe(concat('main.js'))
//.pipe(uglify())
.pipe(rename('main.min.js'))
.pipe(gulp.dest(project.build + '/assets/js/'))
//.pipe(browserSync.stream());
});
/**
* ะะธะปะด common/libs/js ะฟัะพะตะบัะฐ
* Build common/libs/js in project
*/
gulp.task('build-bower', function() {
var buildBowerJs = gulp.src(project.src.vendorJs)
.pipe($.concat('vendor.js'))
.pipe(uglify())
.pipe(rename('vendor.min.js'))
.pipe(gulp.dest(project.build + '/assets/libs/js'));
var buildBowerCss = streamqueue({ objectMode: true },
gulp.src(project.src.vendorCss)
)
//.pipe($.newer(project.build + '/assets/libs/css/vendor.css'))
.pipe($.concat('vendor.css'))
.pipe(autoprefixer({
browsers: ['last 15 versions', 'Explorer > 8'],
cascade: false
}))
//.pipe(concatCss('assets/libs/css/vendor.css', {targetFile: './gg/gg/',inlineImports: false, rebaseUrls: false, includePaths: './erer/'}))
.pipe(minifyCSS())
.pipe(rename('vendor.min.css'))
.pipe(gulp.dest(project.build + '/assets/libs/css'));
var buildBowerCssRelated = {};
var settings = project.src.vendorCssRelated;
for (key in settings) {
buildBowerCssRelated[key] = gulp.src(settings[key].dir)
.pipe(gulp.dest(project.build + '/assets/libs/css' + settings[key].place));
}
return function() {
buildBowerJs;
buildBowerCss;
buildBowerCssRelated;
};
});
/**
* ะะธะปะด common/img ะฟัะพะตะบัะฐ
* Build common/img in project
*/
gulp.task('build-img', function() {
var buildImg = gulp.src('./common/img/**/**')
.pipe(gulp.dest(project.build + '/assets/img/'))
.pipe(browserSync.stream());
return function() {
buildImg;
};
});
/**
* ะะธะปะด common/fonts ะฟัะพะตะบัะฐ
* Build common/fonts in project
*/
/*
var settings = {
'opensans':{
src:'open-sans'
}
};*/
var settings = project.src.fonts;
gulp.task('build-font', function() {
var buildFontCss = {};
for (key in settings) {
buildFontCss[key] = gulp.src('./common/fonts/' + settings[key] + '/*.css')
.pipe($.concat('assets/fonts/' + settings[key] + '/stylesheet.css'))
.pipe(minifyCSS())
.pipe(rename('stylesheet.min.css'))
.pipe(gulp.dest(project.build + '/assets/fonts/' + settings[key] + '/'))
.pipe(browserSync.stream());
}
var buildFont = gulp.src('./common/fonts/**/**').pipe(gulp.dest(project.build + '/assets/fonts/')).pipe(browserSync.reload({ stream: true }));
return function() {
buildFont;
buildFontCss;
};
});
gulp.task('watch', function() {
gulp.watch('common/js/**/*.js', ['build-js']).on('change', browserSync.reload);
gulp.watch('common/template/**/*', ['templates']).on('change', browserSync.reload);
gulp.watch('common/fonts/**/*', ['build-font']).on('change', browserSync.reload);
gulp.watch('common/css/**/*', ['build-css']).on('change', browserSync.reload);
gulp.watch('common/img/**/*', ['build-img']).on('change', browserSync.reload);
});<file_sep>var houseFloors = '';
//ะฟัะธั
ะพะฒัั ะฒัั ะฒะธะดัะปะตะฝะฝั ะฟะพะฒะตัั
ัะฒ
function markFloors() {
houseFloors = $(document).find('.house-img g[id *= floors] *');
houseFloors.each(function(el, i) {
var floorClass = $(this).attr('class');
$(this).attr('class', floorClass + ' floor-hide');
});
}
//ะปะตะนะฑะป ะท ะฐะดัะตัะพั ะฑัะดะธะฝะบั
function addHouseLbl () {
var houseChords = $('.house-img')[0].getBBox();
var address = $('#data-house').data('house');
var houseGroups = $('.house-img g[id *= floors]'),
top = houseGroups[0].getBBox().y,
right = houseGroups[0].getBBox().x + houseGroups[0].getBBox().width;
houseGroups.each(function() {
var item = $(this)[0];
// console.log($(this)[0].getBBox());
if(item.getBBox().y < top)
top = item.getBBox().y;
var thatRight = item.getBBox().x + item.getBBox().width;
if(thatRight > right)
right = thatRight;
});
top = top * 100 / houseChords.height;
top = top < 10 ? 2 : top;
right = 100 - (right * 100 / houseChords.width);
var houseLbl = '<div class="house-label"' +
'style="top:'+ top +'%;' +
'right: '+ right +'%;">' +
'<span class="house-label__txt">'+ address +'</span>' +
'<i class="house-label__ico fa fa-clock-o"></i>' +
'</div>';
$('.flats__body-inner').append(houseLbl);
}
$(document).on('mouseover', '.house-img g[id *= floors] *', function() {
$(this).attr('class', $(this).attr('class').split(' ').shift());
var houseChords = $('.house-img')[0].getBBox();
var floor = {
floorChords: new CalcChords($(this)[0].getBBox()),
number: $(this).attr('id').split('--f-')[1],
color: $(this).css('fill')
};
var label = '';
label += '<div class="floor-label" ';
label += 'style="background: ' + floor.color + ';';
label += 'top:' + floor.floorChords.calculate('y', houseChords.height) + '%;';
label += 'left:' + floor.floorChords.calculate('x', houseChords.width) + '%;"';
label += '>ะญัะฐะถ' + floor.number + '</div>';
$('.flats__body-inner').append(label);
});
$(document).on('mouseout', '.house-img g[id *= floors] *', function() {
var floorClass = $(this).attr('class').split(' ')[0];
$(this).attr('class', floorClass + ' floor-hide');
$(document).find('.floor-label').addClass('label-hide');
});
$(document).on('click', '.house-img g[id *= floors] *', function() {
var floorNumber = $(this).attr('id').split('--f-').pop();
var href = $('.data-div[data-floor='+ floorNumber +']').attr('data-link');
location.href = href;
});
<file_sep>
var callableFunctions = {
'.flats__content' : function () {
callback();
},
'svg.flat-sector__map-min-svg-img': function () {
var sections = new Section('.flat-sector__map-min-svg-img');
addSvgToSlider(sections.createSectionHtml());
},
'.flat': function () {
highlightSelectedFlat();
},
'.house-img': function () {
markFloors();
addHouseLbl();
},
'.plan-block__img': function () {
createComerceObj();
}
};
var svg = {
obj: {
selector: '._svg'
},
run: function(data, successCallback) {
var selector = data.selector ? data.selector : svg.obj.selector;
// var hoverOn = typeof(arguments[0].hoverOn)=='boolean' ? arguments[0].hoverOn:this.obj.hoverOn;
// var hoverColor = arguments[0].hoverColor?arguments[0].hoverColor:this.obj.hoverColor;
var count = $(document).find(selector).length;
$(document).find(selector).each(function() {
var $img = $(this);
var imgID = $img.attr('id');
var imgClass = $img.attr('class');
var imgURL = $img.attr('src');
var imgTitle = $img.attr('title') ? $img.attr('title') : false;
var imgStyle = $img.attr('style') ? $img.attr('style') : false;
var dataWidth = $img.attr('data-width') == 1 ? $img.attr('data-width') : '0';
var imgWidth = false;
if (dataWidth == '1') {
imgWidth = $img.width();
}
$.get(imgURL, function(data) {
// Get the SVG tag, ignore the rest
var $svg = jQuery(data).find('svg');
// Add replaced image's ID to the new SVG
if (typeof imgID !== 'undefined') {
$svg = $svg.attr('id', imgID);
}
// Add replaced image's classes to the new SVG
if (typeof imgClass !== 'undefined') {
$svg = $svg.attr('class', imgClass + ' replaced-svg');
}
if (imgTitle) {
$svg.find('title').html(imgTitle)
}
if (imgWidth) {
imgStyle = imgStyle ? imgStyle : '';
imgStyle = imgStyle + '; width:' + imgWidth + 'px';
}
if (imgStyle) {
$svg.attr('style', imgStyle)
}
// Remove any invalid XML tags as per http://validator.w3.org
$svg = $svg.removeAttr('xmlns:a');
// Replace image with new SVG
$img.replaceWith($svg);
// console.log($svg);
count = count - 1;
if (!count) {
for (var selector in callableFunctions) {
if ($(selector).length) {
callableFunctions[selector]();
}
}
if (successCallback !== undefined) {
successCallback();
}
}
}, 'xml');
})
}
};
/**/
svg.run({
selector: '._svg'
});
$(document).ready(function() {
$(document).on('click', '._js-menu', function() {
var sidebar = $(this).closest('._js-sidebar');
if (sidebar.hasClass('_js-css-sidebar-close')) {
sidebar.removeClass('_js-css-sidebar-close').addClass('_js-css-sidebar-open')
} else {
sidebar.removeClass('_js-css-sidebar-open').addClass('_js-css-sidebar-close')
}
});
$('._js-sidebar').mCustomScrollbar({
scrollbarPosition: "outside"
});
/*
$("body").on({
mouseenter: function () {
var floor = $(this);
floor.css("opacity","0.5");
const regex = /fl--str-(\w+)--h-(\d+)--f-(\d+)/g;
const str = floor.attr('id');
var m = regex.exec(str);
$('#info').show().css({
top: floor.position().top + "px",
left: floor.position().left + "px"
}).html('ะญัะฐะถ '+ m[3]);
},
mouseleave: function () {
$(this).css("opacity","0");
$('#info').hide()
}
}, "polygon");*/
});
(function() {
function stopVideo() {
var mainVideo = document.getElementById('mainVideo');
if (mainVideo) {
if ($(window).innerWidth() < 768) {
if (mainVideo.autoplay) {
mainVideo.load();
mainVideo.autoplay = false;
}
} else {
if (mainVideo.autoplay == false) {
mainVideo.load();
mainVideo.autoplay = true;
}
}
}
}
$(window).on('resize', function() {
stopVideo();
})
$(document).ready(function() {
stopVideo();
})
})();
(function() {
$('.document__tab').click(function() {
$('.document__tab').removeClass('document__tab-active');
$(this).addClass('document__tab-active');
var tab = $(this).data('href');
$('.document__tab-container').removeClass('active-tab-container');
$(tab).addClass('active-tab-container');
})
})();
(function() {
$('.logging-pagination__item').click(function() {
$('.logging-pagination__item').removeClass('logging-pagination__item-active');
$(this).addClass('logging-pagination__item-active');
})
})();
if ($('#map').length) {
ymaps.ready(initMap);
var myMap,
myPlacemark;
function initMap() {
var map = $('#map'),
lbl = document.querySelector('#map + .map__lbl').outerHTML;
myMap = new ymaps.Map(map.attr('id'), {
center: [map.attr('data-coordX'), map.attr('data-coordY')],
zoom: 15
});
myPlacemark = new ymaps.Placemark([55.76, 37.64], {
balloonContent: lbl
});
myMap.geoObjects.add(myPlacemark);
myMap.controls.add(
new ymaps.control.ZoomControl()
);
myMap.controls.add('mapTools');
}
}
function getFlatSvg() {
var floorId = $('.flatData').data('section');
var floorSvg = '<img src="./assets/img/floor/' + floorId + '.svg" class="flat__floor-img ._svg"/>';
$('.flat__floor-img-box').html(floorSvg);
svg.run({
selector: '.flat__floor-img'
})
}
$(document).ready(function() {
if ($('.flat').length) {
getFlatSvg()
}
});
function highlightSelectedFlat() {
var flatId = $('.flatData').data('flat');
$('#' + flatId).attr('class', 'st2 selectedFlat');
console.log(flatId);
}
function goToFlat() {
location.href = "flat.html"
}
function callback() {
var complex = [];
var houses = $(document).find('#houses--rc-yaroslavsky > *');
var houseInfo = $(document).find('.house')
houses.each(function(index, el) {
houseId = $(el).attr('id');
})
houses.click(function() {
var selectedHouse = $(this).attr('id')
window.location.href = "flatHouse.html?" + "houseId=" + selectedHouse;
});
var flatsBodyTop = $('.flats__body').offset().top;
var flatsBodyLeft = $('.flats__body').offset().left;
houseInfo.each(function(index, el) {
var houseDivStreet = $(el).attr('data-street');
var houseDivCorps = $(el).attr('data-corps');
var houseDivNumber = $(el).attr('data-number');
var houseDivId = $(el).attr('data-id');
var selectHouse = $(document).find('#' + houseDivId);
var houseStatus = $(el).attr('data-status');
selectHouse.attr('class', 'cls-1 ' + houseStatus);
$(el).find('.house_text').html('ะฃะป. ' + houseDivStreet + ' ' + houseDivCorps + ', ะดะพะผ ' + houseDivNumber);
$(el).css({ 'top': selectHouse.position().top - flatsBodyTop - 30, 'left': selectHouse.position().left - flatsBodyLeft });
});
houses.hover(function() {
var id = $(this).attr('id');
$(".house[data-id='" + id + "']").css('opacity', '1');
},
function() {
var id = $(this).attr('id');
$(".house[data-id='" + id + "']").css('opacity', '0');
});
}
$('.flats__title-item').click(function() {
//reset
var houses = $('.houses-img g *');
houses.each(function() {
var houseClass = $(this).attr('class');
var activeClassPos = houseClass.indexOf("active");
if(activeClassPos != -1) {
$(this).attr('class', houseClass.substring(0, activeClassPos));
}
});
$('.house').css('opacity', '0');
//mark
var status = $(this).data('type');
var house = $(document).find('.' + status);
$('.house[data-status='+ status +']').css('opacity', '1');
house.attr('class', house.attr('class') + ' active');
});
$(document).ready(function() {
$(".gallery__carousel").slick({
lazyLoad: 'ondemand',
slidesToShow: 5,
slidesToScroll: 1,
prevArrow: '<button type="button" class="slick-prev"><i class="fa fa-angle-left fa-4x" aria-hidden="true"></i></button>',
nextArrow: '<button type="button" class="slick-next"><i class="fa fa-angle-right fa-4x" aria-hidden="true"></i></button>',
responsive: [{
breakpoint: 1700,
settings: {
slidesToShow: 4
}
},
{
breakpoint: 1700,
settings: {
slidesToShow: 3
}
},
{
breakpoint: 1150,
settings: {
slidesToShow: 2
}
},
{
breakpoint: 768,
arrows: false,
settings: {
slidesToShow: 1
}
},
]
});
$(".flat__carousel").slick({
prevArrow: '<button type="button" class="slick-prev">' +
'<img class="slick-prev__ico" src="assets/img/arrow-left-ligth-box.svg"/>' +
'</button>',
nextArrow: '<button type="button" class="slick-next">' +
'<img class="slick-next__ico" src="assets/img/arrow-right-ligth-box.svg"/>' +
'</button>',
});
$('.pulse').click(function() {
$(this).hide();
$(this).next('.pulse__form').fadeIn();
})
$('.pulse__close,.pulse__submit').click(function() {
$(this).parents('.pulse__form').hide();
$(this).parents('.pulse__form').prev('.pulse').fadeIn();
})
});
$('#message-form').submit(function(e) {
e.preventDefault();
var response = '<p class="message__response-title">ะกะฟะฐัะธะฑะพ!</p>' +
'<p class="message__response-text">ะะฐัะฐ ะทะฐัะฒะบะฐ ะฟัะธะฝััะฐ.</p>';
$(this).find('.message__cnt').addClass('message__response');
$(this).find('.message__cnt').html(response);
});
if ($('#about-map').length) {
var myMap
var myPlacemark;
var route;
var routesCollection;
ymaps.ready(initAboutMap);
function addRoute() {
var start = $(".active-route").data('start');
var end = $(".active-route").data('end');
var routes = ymaps.route([start, end]).then(function(router) {
route && myMap.geoObjects.remove(route);
route = router;
var points = route.getWayPoints();
points.get(0).options.set('preset', 'twirl#redStretchyIcon');
points.get(1).options.set('preset', 'twirl#0091EAStretchyIcon');
points.get(0).properties.set('iconContent', 'ะ');
points.get(1).properties.set('iconContent', 'ะ');
route.getPaths().options.set({
strokeColor: '#BA68C8',
opacity: 0.9,
preset: 'twirl#nightStretchyIcon'
});
myMap.geoObjects.add(route);
route.events.add('click', function (e) {
var way = route.getPaths().get(0);
var segments = way.getSegments();
var center = segments[Math.round(way.getSegments().length/2)];
var center1 = Math.floor(center.getCoordinates().length/2);
myMap.setCenter(center.getCoordinates()[center1]);
myMap.setZoom(9);
})
},
function(error) {
alert("ะะพะทะฝะธะบะปะฐ ะพัะธะฑะบะฐ: " + error.message);
});
myMap.geoObjects.add(route);
}
function initAboutMap() {
myMap = new ymaps.Map("about-map", {
center: [55.76, 37.64],
zoom: 15
});
myMap.controls.add(
new ymaps.control.ZoomControl()
);
myMap.controls.add('mapTools');
addRoute();
}
$('.location-block__route').click(function() {
$('.location-block__route').removeClass('active-route');
$(this).addClass('active-route');
addRoute();
})
}
<file_sep>function CalcChords(selector) {
var $this = this;
$this.x = selector.x;
$this.y = selector.y;
$this.width = selector.width;
$this.height = selector.height;
$this.calculate = function (property, param, sectionQty, sectionPos, commonSvgWidth ) {
if(property == 'x' && sectionQty) {
//commonSvgWidth/sectionQty - ัะธัะธะฝะฐ ัะตะบััั
//(sectionQty-sectionPos) - ะบัะปัะบัััั ัะตะบััะน ะพะบััะผ ะดะฐะฝะพั
console.log('property' + '-' + $this[property]);
console.log('commonSvgWidth' + '-' + commonSvgWidth);
console.log('sectionQty' + '-' + sectionQty);
console.log('sectionPos', sectionPos);
return (($this[property] - (commonSvgWidth/sectionQty * (sectionQty-sectionPos))) * 100 / param);
} else {
return $this[property] * 100 / param;
}
}
}
<file_sep>/* ------------------------------- */
//floor slider
/* ------------------------------- */
$(document).ready(function() {
$(".floor-slider").slick({
centerMode: true,
slidesToShow: 3,
vertical: true,
prevArrow: '<button type="button" class="slick-prev"><i class="fa fa-angle-up fa-4x" aria-hidden="true"></i></button>',
nextArrow: '<button type="button" class="slick-next"><i class="fa fa-angle-down fa-4x" aria-hidden="true"></i></button>',
infinite: true
});
});
$('.floor-slider').on('init afterChange', function(event, slick) {
$(".flat-sector__preloader").css('display', 'flex');
createFloor();
});
function createFloor() {
var svgCommon = $('.floor-slider .slick-active.slick-center').attr('data-floorSVG');
var svgImg = '<img src="' + svgCommon + '" class="_svg flat-sector__map-min-svg-img"/>';
$('.flat-sector__map-min-svg').html(svgImg);
svg.run({
selector: '.flat-sector__map-min-svg-img'
});
}
$('.floor-slider').on('beforeChange', function(event, slick) {
$('.flat-sector__number').remove();
});
function goToSelectedFloor() {
if(getURLVar('flat') && getURLVar('floor')) {
$('body').append('<div id="selected-flat" data-flat="'+getURLVar('flat')+'" data-floor="'+ getURLVar('floor') +'"></div>');
window.history.pushState(null, null, "?");
}
try {
var div = $(document).find('#selected-flat');
var floor = div.data('floor');
var flat = div.data('flat');
if(floor && flat) {
var sliderFloor = $('.floor-slider');
$.each(sliderFloor[0].slick.$slides, function(i, slide) {
if ($(slide).find('.floor-number').text() == floor && !$(slide).hasClass('slick-cloned')) {
sliderFloor.slick('slickGoTo', i);
}
});
}
} catch (error) {
setTimeout(goToSelectedFloor, 100);
return;
}
}
goToSelectedFloor();
<file_sep>function getURLVar(key) {
var value = [];
var query = String(document.location).split('?');
if (query[1]) {
var part = query[1].split('&');
for (i = 0; i < part.length; i++) {
var data = part[i].split('=');
if (data[0] && data[1]) {
value[data[0]] = data[1];
}
}
if (value[key]) {
return value[key];
}
return false;
}
return false;
}
|
0953edf5d265733b48109e4231d976536aa8d7c5
|
[
"JavaScript"
] | 8 |
JavaScript
|
IrinaDer/complex
|
11555de6ea3d524aa94ac8d698d9b38c01808b9d
|
960425d4b87bebb0e291f4957f40588c1ac93c25
|
refs/heads/master
|
<file_sep># test_task_regForm
test task for job<file_sep>$(document).ready(function(){
$('.sign__up_href').click(function(e){
});
$('.sign__in_href').click(function(e){
});
$('.log_href').click(function(e){
});
$('.reg_href').click(function(e){
});
});
|
ba600d01d76cb98ec0053ad70f295a13f09d0ff8
|
[
"Markdown",
"JavaScript"
] | 2 |
Markdown
|
AWwebDev/test_task_regForm
|
95fe7451cc775a3a1189a2433e40a2ee359dddf7
|
d4c5c7c218d0ff6d322c72a03450203c191b14e6
|
refs/heads/master
|
<file_sep>List = {};
function List.new ()
return {first = 0, last = -1, size = 0}
end
function List.push (list, value)
local first = list.first - 1
list.first = first
list[first] = value
list.size = list.size + 1;
end
function List.popTop (list)
local first = list.first
if list.size <= 0 then return nil end
local value = list[first]
list[first] = nil -- to allow garbage collection
list.first = first + 1
list.size = list.size - 1;
return value
end
function List.popBottom (list)
local last = list.last
if list.first > last then return nil end
local value = list[last]
list[last] = nil -- to allow garbage collection
list.size = list.size - 1;
list.last = last - 1
return value
end
function Lerp(pos1, pos2, t)
x = (1 - t) * pos1.X + t * pos2.X;
y = (1 - t) * pos1.Y + t * pos2.Y;
return Vector(x, y);
end
-- EndRegion Usefull Functions
local chronal_mod = RegisterMod( "Chronal", 1);
local chronal_item = Isaac.GetItemIdByName( "Chronal Accelerator" )
local debugText = "null";
local lastData = nil;
local dataQueue = List.new();
local chargeFrameCount = 0;
local rewinding = false;
local lastFrameRewinding = false;
local rewindFrameCount = 0;
local dataGatheringFrequency = 2;
local framesPerCharge = 15;
function chronal_mod:changePlayerInfos()
local player = Isaac.GetPlayer(0);
local pPos = player.Position;
local newPos = Lerp(pPos, lastData.playerInfos.pos, 1 / (framesPerCharge / 3) );
player.Position = newPos;
player.Velocity = lastData.playerInfos.velocity * (-1 / ( framesPerCharge / 3));
player:AddCoins(lastData.playerInfos.coinCount - player:GetNumCoins() > 0 and lastData.playerInfos.coinCount - player:GetNumCoins() or 0);
player:AddBombs(lastData.playerInfos.bombCount - player:GetNumBombs() > 0 and lastData.playerInfos.bombCount - player:GetNumBombs() or 0);
player:AddKeys(lastData.playerInfos.keyCount - player:GetNumKeys() > 0 and lastData.playerInfos.keyCount - player:GetNumKeys() or 0);
player:AddHearts(lastData.playerInfos.hearts - player:GetHearts() > 0 and lastData.playerInfos.hearts - player:GetHearts() or 0);
local mask;
local beforeBHearts = 0;
local afterBHearts = 0;
mask = lastData.playerInfos.blackHearts;
while (mask > 0) do
if (mask & 1 == 1) then
beforeBHearts = beforeBHearts + 1;
end
mask = mask >> 1;
end
mask = player:GetBlackHearts();
while (mask > 0) do
if (mask & 1 == 1) then
afterBHearts = afterBHearts + 1;
end
mask = mask >> 1;
end
local beforeSHearts = lastData.playerInfos.soulHearts - beforeBHearts;
local afterSHearts = player:GetSoulHearts() - afterBHearts;
player:AddSoulHearts(beforeSHearts - afterSHearts > 0 and beforeSHearts - afterSHearts or 0);
player:AddBlackHearts(beforeBHearts - afterBHearts > 0 and beforeBHearts - afterBHearts or 0);
player:AddEternalHearts(lastData.playerInfos.eternalHearts - player:GetEternalHearts() > 0 and lastData.playerInfos.eternalHearts - player:GetEternalHearts() or 0);
end
function chronal_mod:changeEntitiesInfos()
local ents = Isaac.GetRoomEntities();
for k,v in pairs(ents) do
Isaac.DebugString(k.." "..v.Type);
if v:IsEnemy() or v.Type == EntityType.ENTITY_PROJECTILE then
Isaac.DebugString("FREEZE !");
v:AddFreeze(EntityRef(player), 2);
if (v:IsBoss()) then
Isaac.DebugString("MEGA FREEZE !");
v:AddFreeze(EntityRef(v), 5);
end
end
end
Isaac.DebugString("\n");
end
function chronal_mod:rewind()
if (lastData == nil) then
lastData = List.popTop(dataQueue);
if (lastData == nil) then return end
end
chronal_mod.changePlayerInfos();
chronal_mod.changeEntitiesInfos();
end
function chronal_mod:evaluateRewinding()
player = Isaac.GetPlayer(0);
lastFrameRewinding = rewinding;
if Input.IsActionPressed(ButtonAction.ACTION_ITEM, player.ControllerIndex) and player:GetActiveCharge() > -1 then
rewinding = true;
player.ControlsEnabled = false;
player.EntityCollisionClass = EntityCollisionClass.ENTCOLL_NONE;
else
player.EntityCollisionClass = EntityCollisionClass.ENTCOLL_ALL;
player.ControlsEnabled = true;
rewinding = false;
end
end
function chronal_mod:onUpdate()
local player = Isaac.GetPlayer(0);
debugText = Isaac.GetFrameCount();
if (player:HasCollectible(chronal_item)) then
chronal_mod.evaluateRewinding();
if (lastFrameRewinding == false and rewinding == true) then -- First frame rewinding
chargeFrameCount = 0;
rewindFrameCount = 0;
-- Isaac.DebugString("Decharging First ! "..rewindFrameCount .. " < = > " .. framesPerCharge);
player:SetActiveCharge(player:GetActiveCharge() - 1);
elseif (lastFrameRewinding == true and rewinding == false) then -- Not rewinding anymore
end
if (lastFrameRewinding == true and rewinding == true and rewindFrameCount % framesPerCharge == 0) then -- Second to last rewinding frame
-- Isaac.DebugString("Decharging ! "..rewindFrameCount .. " < = > " .. framesPerCharge);
player:SetActiveCharge(player:GetActiveCharge() - 1);
end
if (rewinding == true) then -- Every rewinding frame
if (rewindFrameCount % (dataGatheringFrequency) == 0) then
lastData = nil;
end
rewindFrameCount = rewindFrameCount + 1;
chronal_mod.rewind();
else -- Rewinding == FALSE
chargeFrameCount = chargeFrameCount + 1;
end
if (chargeFrameCount % dataGatheringFrequency == 0 and chargeFrameCount > dataGatheringFrequency * 10) then
local data = {};
data.playerInfos = {};
data.playerInfos.pos = player.Position;
data.playerInfos.hearts = player:GetHearts();
data.playerInfos.soulHearts = player:GetSoulHearts();
data.playerInfos.eternalHearts = player:GetEternalHearts();
data.playerInfos.blackHearts = player:GetBlackHearts();
data.playerInfos.coinCount = player:GetNumCoins();
data.playerInfos.keyCount = player:GetNumKeys();
data.playerInfos.bombCount = player:GetNumBombs();
data.playerInfos.orientation = player:GetHeadDirection();
data.playerInfos.velocity = player.Velocity;
List.push(dataQueue, data);
if (dataQueue.size > 6 * framesPerCharge / dataGatheringFrequency) then
List.popBottom(dataQueue);
end
end
end
end
function onPostUpdate()
end
function chronal_mod:resetData()
while (dataQueue.size > 0) do
List.popTop(dataQueue);
end
rewinding = false;
lasData = nil;
chargeFrameCount = 0;
rewindFrameCount = 0;
end
-- function chronal_mod:take_damage()
-- if(player:GetActiveItem()== chronal_item) then
-- triggerchronal();
-- end
-- end
function chronal_mod:onInput(entity, hook, action)
if (entity ~= nil) then
local player = entity:ToPlayer();
if player then
if action == ButtonAction.ACTION_ITEM and Input.IsActionPressed(ButtonAction.ACTION_ITEM, player.ControllerIndex) and player:HasCollectible(chronal_item) then
return not Input.GetActionValue(ButtonAction.ACTION_ITEM, player.ControllerIndex);
end
-- if rewinding and action ~= ButtonAction.ACTION_ITEM then
-- if hook == InputHook.GET_ACTION_VALUE then
-- return 0.0;
-- elseif hook == InputHook.IS_ACTION_PRESSED then
-- return false;
-- end
-- end
end
end
end
function chronal_mod:debug_text()
local player = Isaac.GetPlayer(0);
Isaac.RenderText("Charges: " .. dataQueue.size, 400, 50, 255, 0, 0, 255)
Isaac.RenderText("Frame: " .. debugText, 40, 65, 255, 255, 255, 255)
Isaac.RenderText("BH ".. player:GetBlackHearts().." "..player:GetSoulHearts().." "..player:GetEternalHearts(), 40, 75, 255, 255, 255, 255, 255);
Isaac.RenderText("Charged frames ".. chargeFrameCount, 40, 85, 255, 255, 255, 255, 255);
Isaac.RenderText("Rewinding frames ".. framesPerCharge, 40, 100, 255, 255, 255, 255, 255);
Isaac.RenderText("Rewinding ".. (rewinding == true and '1' or '0'), 400, 65, 255, 255, 255, 255, 255);
end
chronal_mod:AddCallback(ModCallbacks.MC_INPUT_ACTION, chronal_mod.onInput);
chronal_mod:AddCallback(ModCallbacks.MC_POST_PEFFECT_UPDATE, chronal_mod.onUpdate);
-- chronal_mod:AddCallback( ModCallbacks.MC_ENTITY_TAKE_DMG, chronal_mod.take_damage, EntityType.ENTITY_PLAYER)
chronal_mod:AddCallback(ModCallbacks.MC_POST_RENDER, chronal_mod.debug_text);
chronal_mod:AddCallback(ModCallbacks.MC_POST_NEW_ROOM, chronal_mod.resetData);
|
75211a6476099b670d10537545cc332f0e7ab2ed
|
[
"Lua"
] | 1 |
Lua
|
mrrinot/Chronal
|
a1c6618d03410e41d09e1ca5d4e92aa81c7bca8e
|
ad5e70d28eb248f6c851f7bdfc7c4e4a7a6d8184
|
refs/heads/master
|
<file_sep><div class="content-wrapper">
<!-- Content Header (Page header) -->
<section class="content-header">
<h1>
Data Pelanggan
<small>ezeelink</small>
</h1>
<ol class="breadcrumb">
<li><a href="#"><i class="fa fa-dashboard"></i> Home</a></li>
<li><a href="#">Data Pelanggan</a></li>
</ol>
</section>
<!-- Main content -->
<section class="content">
<div class="row">
<div class="col-md-3 col-sm-6 col-xs-12">
<div class="info-box">
<span class="info-box-icon bg-aqua"><i class="fa fa-user"></i></span>
<div class="info-box-content">
<span class="info-box-text">Jumlah Pelanggan</span>
<span class="info-box-number"><?php echo $jumlahCust; ?></span>
</div>
<!-- /.info-box-content -->
</div>
<!-- /.info-box -->
</div>
<!-- /.col -->
<div class="col-md-3 col-sm-6 col-xs-12">
<div class="info-box">
<span class="info-box-icon bg-green"><i class="fa fa-flag-o"></i></span>
<div class="info-box-content">
<span class="info-box-text">Kota Dengan</span>
<span class="info-box-text">Pelanggan Terbanyak</span>
<span class="info-box-number">
<?php
echo $highCity->Kota;
?>
</span>
</div>
<!-- /.info-box-content -->
</div>
<!-- /.info-box -->
</div>
<!-- /.col -->
<div class="col-md-3 col-sm-6 col-xs-12">
<div class="info-box">
<span class="info-box-icon bg-yellow"><i class="fa fa-male"></i></span>
<div class="info-box-content">
<span class="info-box-text">Jumlah </span>
<span class="info-box-text">Pelanggan Pria</span>
<span class="info-box-number">41</span>
</div>
<!-- /.info-box-content -->
</div>
<!-- /.info-box -->
</div>
<!-- /.col -->
<div class="col-md-3 col-sm-6 col-xs-12">
<div class="info-box">
<span class="info-box-icon bg-red"><i class="fa fa-female"></i></span>
<div class="info-box-content">
<span class="info-box-text">Jumlah </span>
<span class="info-box-text">Pelanggan Wanita </span>
<span class="info-box-number">50</span>
</div>
<!-- /.info-box-content -->
</div>
<!-- /.info-box -->
</div>
<!-- /.col -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-md-3">
<a href="#analisis" class="btn btn-default">Analisis Data Pelanggan</a>
</div>
</div>
<br />
<div class="row">
<div class="col-xs-12">
<div class="box">
<div class="box-header">
<h3 class="box-title">Data Pelanggan</h3>
</div>
<!-- /.box-header -->
<div class="box-body">
<form action="<?php echo $base_url;?>index.php/customers" method="post">
<label>Cari Berdasarkan</label>
<select class="form-control" name="searchField">
<option value="Nama">Nama</option>
<option value="Alamat">Alamat</option>
<option value="Kota">Kota</option>
</select>
<br />
<div class="input-group">
<input type="text" name="q" class="form-control" placeholder="Cari">
<span class="input-group-btn">
<button type="submit" name="search" id="search-btn" class="btn btn-flat"><i class="fa fa-search"></i></button>
</span>
</div>
<br />
<?php
if (!empty($search)){
echo 'Menampilkan Data Pelanggan Dengan <strong>'.$searchField.'</strong> <i>"'.$search.'"</i> ';
echo '<br />';
}
?>
</form>
<!-- <table id="example1" class="table table-bordered table-striped">
</table> -->
<label><?php echo $message; ?></label>
<br />
<form action="<?php echo $base_url;?>index.php/customers/download_pdf" method="get">
<div class="col-md-6">
<input type="submit" id="import" class="btn btn-warnig" value="Import Data yang Dipilih Ke File PDF">
<input type="text" id="jumlah" name="jumlah" value=0 size=5 disabled>
<a href="<?php echo $base_url;?>index.php/customers/download_csv" class="btn btn-info">
<i class="fa fa-file-excel-o"> Import Semua Data Ke File .XLS</i>
</a>
</div>
</form>
<form action="<?php echo $base_url;?>index.php/customers" method="post">
<div class="col-md-6">
<label>Tampilkan Per</label>
<select name="limit">
<option value="10">10</option>
<option value="20">20</option>
<option value="99999999">Semua</option>
</select>
<label>Data</label>
<input type="submit" class="btn btn-default" value="Ok">
</div>
<br />
<div><?php echo $table;?></div>
</form>
<script type="text/javascript">
function clickAll(){
var checked = false;
if (document.getElementById("checkMaster").checked == true)
checked = true;
var tbl = document.getElementById("lineItemTable");
var rowLen = tbl.rows.length;
for (var idx = 1; idx < rowLen; idx++) {
var row = tbl.rows[idx];
var cell = row.cells[0];
var node = cell.lastChild;
node.checked = checked;
}
cek();
}
function cek(){
var tbl = document.getElementById("lineItemTable");
var rowLen = tbl.rows.length;
var jumlah = 0;
for (var idx = 1; idx < rowLen; idx++) {
var row = tbl.rows[idx];
var cell = row.cells[0];
var node = cell.lastChild;
if (node.checked == true)
jumlah++;
document.getElementById('jumlah').value = jumlah;
}
}
</script>
<ul class="pagination pagination-sm">
<li>
<?php
if(empty($search))
echo $pagination;
?>
</li>
</ul>
<br />
</div>
<!-- /.box-body -->
</div>
<!-- /.box -->
</div>
<!-- /.col -->
<script type="text/javascript" src="<?php echo $base_url;?>assets/jquery.js"></script>
<script type="text/javascript" src="<?php echo $base_url;?>assets/Chart.js"></script>
<div class="col-md-6" id="analisis">
<!-- BAR CHART -->
<div class="box box-info">
<div class="box-header with-border">
<h3 class="box-title">Analisis Data Pekerjaan Pelanggan</h3>
<div class="box-tools pull-right">
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
</button>
</div>
</div>
<div class="box-body">
<div class="chart">
<canvas id="demoChart" style="height:250px"></canvas>
</div>
</div>
</div>
<!-- /.box -->
</div>
<!-- /.col (LEFT) -->
<div class="col-md-6">
<!-- LINE CHART -->
<div class="box box-info">
<div class="box-header with-border">
<h3 class="box-title">Analisis Data Transaksi Pelanggan</h3>
<div class="box-tools pull-right">
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
</button>
</div>
</div>
<div class="box-body">
<div class="chart">
<canvas id="testChart" style="height:250px"></canvas>
</div>
</div>
</div>
<!-- /.box -->
</div>
<!-- /.box -->
<div class="col-md-6">
<!-- PIE CHART -->
<div class="box box-info">
<div class="box-header with-border">
<h3 class="box-title">Analisis Data Kota Pelanggan</h3>
<div class="box-tools pull-right">
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
</button>
</div>
</div>
<div class="box-body">
<div class="col-md-3">
<div id="doughnatLegend"></div>
</div>
<div class="chart">
<canvas id="doughnatChart" style="height:250px"></canvas>
</div>
</div>
</div>
<!-- /.box -->
</div>
<div class="col-md-6">
<!-- BAR CHART -->
<div class="box box-info">
<div class="box-header with-border">
<h3 class="box-title">Chart Analisis Rating Pelanggan</h3>
<div class="box-tools pull-right">
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
</button>
</div>
</div>
<div class="box-body">
<div class="col-md-3">
<div id="ratingLegend"></div>
</div>
<div class="chart">
<canvas id="ratingChart" style="height:250px"></canvas>
</div>
</div>
</div>
<!-- /.box -->
</div>
<div class="col-md-6">
<!-- PIE CHART -->
<div class="box box-info">
<div class="box-header with-border">
<h3 class="box-title">Analisis Data Umur Pelanggan</h3>
<div class="box-tools pull-right">
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
</button>
</div>
</div>
<div class="box-body">
<div class="col-md-3">
<div id="umurLegend"></div>
</div>
<div class="chart">
<canvas id="umurChart" style="height:250px"></canvas>
</div>
</div>
</div>
<!-- /.box -->
</div>
</div>
<script type="text/javascript" src="<?php echo $base_url;?>assets/jquery.js"></script>
<script type="text/javascript" src="<?php echo $base_url;?>assets/Chart.js"></script>
<script type="text/javascript">
$(document).ready(function(){
// bar chart data pekerjaan
var data = {
labels: <?php echo $contactTitle; ?>,
datasets: [
{
fillColor: "rgba(220,220,220,0.2)",
strokeColor: "rgba(220,220,220,1)",
pointColor: "rgba(220,220,220,1)",
pointStrokeColor: "#FFF",
pointHighlightFill: "#FFF",
pointHighlightStroke: "rgba(220,220,220,1)",
data: <?php echo $groupContactTitle; ?> }
]
};
var ctx = document.getElementById("demoChart").getContext("2d");
var chart = new Chart(ctx).Bar(data);
// =============================================================
// line chart data transaksi pelanggan
var data2 = {
labels: ["January", "February", "March", "April", "May", "June", "July"],
datasets: [
{
label: "My First dataset",
fillColor: "rgba(220,220,220,0.2)",
strokeColor: "rgba(220,220,220,1)",
pointColor: "rgba(220,220,220,1)",
pointStrokeColor: "#fff",
pointHighlightFill: "#fff",
pointHighlightStroke: "rgba(220,220,220,1)",
data: [65, 59, 80, 81, 56, 55, 40]
}
]
};
var ctx2 = document.getElementById("testChart").getContext("2d");
var chart2 = new Chart(ctx2).Line(data2);
// ==============================================================
// pie chart data kota pelanggan
var data4 = [ <?php
$i = 0;
$rand = array('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f');
foreach($customerKota as $c){
{
$color = '#'.$rand[rand(0,15)].$rand[rand(0,15)].$rand[rand(0,15)].$rand[rand(0,15)].$rand[rand(0,15)].$rand[rand(0,15)];
$i++;
$dataPie = '{
value: '.$c->Jumlah.',
color:"'.$color.'",
highlight: "'.$color.'",
label: "'.$c->Kota.'"
},';
echo $dataPie;
}
}
?>
{
value: 0,
color: "",
highlight: "",
label: ""
}
]
// doughnat
var ctx4 = document.getElementById('doughnatChart').getContext("2d");
var chart4 = new Chart(ctx4).Doughnut(data4);
// ====================================================
// bar chart rating pelanggan
var data5 = {
labels: ["Bintang 1", "Bintang 2", "Bintang 3", "Bintang 4", "Bintang 5"],
datasets: [
{
fillColor: "rgb(102, 255, 204)",
strokeColor: "#3b8bba",
pointColor: "Blue",
pointStrokeColor: "#FFF",
pointHighlightFill: "#FFF",
pointHighlightStroke: "rgba(220,220,220,1)",
data: <?php echo $groupRatingPelanggan; ?> }
]
};
var ctx5 = document.getElementById("ratingChart").getContext("2d");
var chart5 = new Chart(ctx5).Bar(data5);
// ===================================================
// pie chart data umur pelanggan
var data6 = [ <?php
$i = 0;
$rand = array('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f');
foreach($groupUmur as $c){
{
$color = '#'.$rand[rand(0,15)].$rand[rand(0,15)].$rand[rand(0,15)].$rand[rand(0,15)].$rand[rand(0,15)].$rand[rand(0,15)];
$i++;
$dataPie = '{
value: '.$c->Jumlah.',
color:"'.$color.'",
highlight: "'.$color.'",
label: "Umur '.$c->Umur.'"
},';
echo $dataPie;
}
}
?>
{
value: 0,
color: "",
highlight: "",
label: ""
}
]
// doughnat
var ctx6 = document.getElementById('umurChart').getContext("2d");
var chart6 = new Chart(ctx6).Pie(data6, {
//String - A legend template
legendTemplate : "<ul class=\"<%=name.toLowerCase()%>-legend\"><% for (var i=0; i<segments.length; i++){%><span style=\"background-color:<%=segments[i].fillColor%>\"><%if(segments[i].label){%><%=segments[i].label%><%}%></span><br /><%}%></ul>"
});
document.getElementById("umurLegend").innerHTML = chart6.generateLegend();
});
</script>
<div class="col-sm-9">
<canvas id="demoChart" width="600" height="350"> </canvas>
</div>
<!-- /.row -->
</section>
<!-- /.content -->
</div><file_sep><?php
class Customers extends CI_Controller{
private $limit = 10;
function __construct(){
parent::__construct();
#load liblary dan helper
$this->load->library(array('table', 'form_validation'));
$this->load->helper(array('form', 'url', 'text'));
$this->load->model('mCustomer','',true);
$this->file_path = realpath(APPPATH . '../assets');
// cek login
if (empty($this->session->userdata('user'))){
redirect('main');
}
}
function index($offset=0, $order_column='PelangganID', $order_type='asc'){
if(!empty($this->input->post('limit')))
$this->limit = $this->input->post('limit');
$search = $this->input->post('q');
$data['search'] = $search;
$searchField = $this->input->post('searchField');
$data['searchField'] = $searchField;
$data['base_url'] = $this->config->item('base_url');
if(!empty($search))
$this->limit = $this->mCustomer->count_all();
if (empty($offset)) $offset = 0;
if (empty($order_column)) $order_column = 'PelangganID';
if (empty($order_type)) $order_type = 'asc';
$customers = $this->mCustomer->get_paged_list($this->limit, $offset,
$order_column, $order_type, $search, $searchField)->result();
// generate pagination
$this->load->library('pagination');
$config['base_url'] = site_url('customers/index/');
$config['total_rows'] = $this->mCustomer->count_all();
$config['per_page'] = $this->limit;
$config['uri_segment'] = 3;
$config['first_link'] = 'Awal';
$config['last_link'] = 'Akhir';
$config['attributes'] = array('class' => 'li pagination');
$this->pagination->initialize($config);
$data['pagination'] = $this->pagination->create_links();
// generate table data
$this->load->library('table');
$config['attributes']['rel'] = FALSE;
$template = array(
'table_open' => '<table class="table table-bordered table-striped" id="lineItemTable">',
'table_close' => '</table>'
);
$this->table->set_template($template);
$this->table->set_empty(" ");
$new_order = ($order_type=='asc'?'desc':'asc');
$this->table->set_heading('<input type="checkbox" name="idxall" value="all" id="checkMaster" onclick="clickAll()">','No',
anchor('customers/index/'.$offset.'/CompanyName/'.$new_order,'CompanyName'),
anchor('customers/index/'.$offset.'/Nama/'.$new_order,'Nama'),
anchor('customers/index/'.$offset.'/Alamat/'.$new_order,'Alamat'),
anchor('customers/index/'.$offset.'/Kota/'.$new_order,'Kota'),
'Aksi');
$i=0 + $offset;
foreach ($customers as $customer) {
$this->table->add_row( '<input type="checkbox" name="'.$i.'" value="'.$customer->PelangganID.'" onchange="cek()">',
++$i, $customer->CompanyName, $customer->Nama,
$customer->Alamat, $customer->Kota,
'<a href="'.$this->config->item('base_url').'index.php/customers/profileCustomer/'.$customer->PelangganID.'" class="btn btn-info">Profil</a>'.' '.
'<a href="#"><span class="glyphicon glyphicon-pencil"></span></a>'.' '.
anchor('customers/delete/'.$customer->PelangganID, '<span class="glyphicon glyphicon-remove">', array('onclick' => "return confirm('Apakah Anda Yakin Ingin Menghapus Data Customers Ini ?')"))
);
}
// $data['table'] = highlight_phrase($this->table->generate(), $search, '<span style="color:#990000;">', '</span>');
$data['table'] = $this->table->generate();
if ($this->uri->segment(3) == 'delete_success')
$data['message'] = 'Data Berhasil Dihapus';
else if ($this->uri->segment(3) == 'add_success')
$data['message'] = 'Data Berhasil Ditambah';
else
$data['message'] = '';
$data['jumlahCust'] = $this->mCustomer->count_all();
// data untuk chart analisis pekerjaan customer
// -----------------------------------------------
$contactTitle = $this->mCustomer->get_dataPekerjaan()->result();
$groupContactTitle = $this->mCustomer->get_groupPekerjaan()->result();
$dataContactTitle = array();
$dataGroupContactTitle = array();
foreach($contactTitle as $c){
$dataContactTitle[] = $c->Pekerjaan;
}
foreach ($groupContactTitle as $d) {
$dataGroupContactTitle[] = $d->Jumlah;
}
$data['contactTitle'] = json_encode($dataContactTitle);
$data['groupContactTitle'] = json_encode($dataGroupContactTitle);
//-------------------------------------------------
// data untuk analisis kota customer
// ------------------------------------------------
$kota = $this->mCustomer->get_city()->result();
$data['customerKota'] = $kota; // data banyaknya customer per kota
// ------------------------------------------------
// data untuk kota dengan customer terbanyak
// ----------------------------------------------
$data['highCity'] = $this->mCustomer->get_city()->first_row();
// --------------------------------------------
// data untuk chart rating pelanggan
// ----------------------------------------------
$data['groupRatingPelanggan'] = json_encode($this->mCustomer->get_groupRatingPelanggan());
//-----------------------------------------------
// data untuk chart group umur pelanggan
// ----------------------------------------------
$data['groupUmur'] = $this->mCustomer->get_groupUmur()->result();
// ---------------------------------------------
$this->load->view('header',$data);
$this->load->view('admin_ezeelink/dataCustomer', $data);
$this->load->view('footer',$data);
}
function download_csv(){
$this->load->model('mCustomer');
$this->load->dbutil();
$this->load->helper('file');
$report = $this->mCustomer->get_all_data();
$delimiter = ",";
$newline = "\r\n";
$new_report = $this->dbutil->csv_from_result($report, $delimiter, $newline);
write_file($this->file_path . '/csv_customer/csv_file.xls', $new_report);
$this->load->helper('download');
$data = file_get_contents($this->file_path . '/csv_customer/csv_file.xls');
$name = 'Customers-'.date('d-m-Y').'.xls';
force_download($name, $data);
}
function download_pdf(){
$this->load->library('cezpdf');
$db_data = array();
$db_data = $this->mCustomer->get_all_data()->result_array();
$jumlah = $this->input->get('jumlah');
$idx=0;
for($i=0;$i<$jumlah;$i++){
$idx++;
$db_data[] = $this->mCustomer->get_by_id($this->input->get($idx))->row_array();
}
$col_names = array();
$this->cezpdf->ezTable($db_data);
$this->cezpdf->ezStream();
}
function delete($id){
$this->mCustomer->delete($id);
redirect('customer/index/delete_success', 'refresh');
}
function profileCustomer($id){
$data['customer'] = $this->mCustomer->get_by_id($id)->row();
$data['base_url'] = $this->config->item('base_url');
// data histori transaksi
$customerOrders = $this->mCustomer->get_transaksiPelanggan($id)->result();
$this->load->library('table');
$config['attributes']['rel'] = FALSE;
$template = array(
'table_open' => '<table class="table table-bordered table-striped">',
'table_close' => '</table>'
);
$this->table->set_template($template);
$this->table->set_empty(" ");
$this->table->set_heading('No','Nama Merchant', 'Diskon','Tempat','Tanggal');
$i=0;
foreach ($customerOrders as $c) {
$this->table->add_row(++$i, $c->Nama,
$c->Diskon, $c->TempatTransaksi, $c->TanggalTransaksi
);
}
$data['historiTransaksi'] = $this->table->generate();
// =================================================
// data banyaknya transaksi customer per merchant
$data['customerMerchant'] = $this->mCustomer->get_pelangganMerchant($id)->result();
// =================================================
$data['jumlahTransaksi'] = $i;
$data['rating'] = $this->mCustomer->get_ratingPelanggan($i);
$this->load->view('header',$data);
$this->load->view('admin_ezeelink/profileCustomer', $data);
$this->load->view('footer', $data);
}
}
?><file_sep><div class="content-wrapper">
<!-- Content Header (Page header) -->
<section class="content-header">
<h1>
Profile Customer
<small>Ezeelink</small>
</h1>
<ol class="breadcrumb">
<li><a href="#"><i class="fa fa-dashboard"></i> Home</a></li>
<li><a href="#">Data Customer</a></li>
<li><a href="#">Profile Customer</a></li>
</ol>
</section>
<!-- Main content -->
<section class="content">
<div class="row">
<div class="col-md-3">
<div class="box box-solid">
<!-- Widget: user widget style 1 -->
<div class="box box-widget widget-user">
<!-- Add the bg color to the header using any of the bg-* classes -->
<div class="widget-user-header bg-aqua-active">
<h3 class="widget-user-username"><?php echo $customer->Nama; ?></h3>
<h5 class="widget-user-desc"><?php echo $customer->Alamat; ?></h5>
<div>
<p>
<?php
echo $rating.' ';
for ($i=0;$i<$rating;$i++){
echo '<span class="glyphicon glyphicon-star"></span>';
}
?>
</p>
</div>
</div>
<div class="box-footer">
<div class="row">
<div class="col-sm-4 border-right">
<div class="description-block">
<h5 class="description-header">Jumlah Transaksi</h5>
<span class="description-text"><?php echo $jumlahTransaksi; ?></span>
</div>
<!-- /.description-block -->
</div>
<!-- /.col -->
<div class="col-sm-4 border-right">
<div class="description-block">
<h5 class="description-header">Usia</h5>
<span class="description-text"><?php echo $customer->Umur; ?></span>
</div>
<!-- /.description-block -->
</div>
<!-- /.col -->
<div class="col-sm-4">
<div class="description-block">
<h5 class="description-header">Gender</h5>
<span class="description-text">M</span>
</div>
<!-- /.description-block -->
</div>
<!-- /.col -->
</div>
<!-- /.row -->
</div>
</div>
<!-- /.widget-user -->
</div>
</div>
<!-- /.col -->
<div class="col-md-9">
<div class="box box-primary">
<div class="box-body no-padding">
<div class="box-header with-border">
<h3 class="box-title">Rincian Data</h3>
</div>
<div class="box-body">
<div class="col-md-3 col-sm-4"><span class="fa fa-user"> Nama </span></div>
<div class="col-md-3 col-sm-4"><?php echo $customer->Nama; ?></div>
<br />
<div class="col-md-3 col-sm-4"><span class="fa fa-location-arrow"> Alamat</span></div>
<div class="col-md-3 col-sm-4"><?php echo $customer->Alamat; ?></div>
<br />
<br />
<div class="col-md-3 col-sm-4"><span class="fa fa-building-o"> Kota</span></div>
<div class="col-md-3 col-sm-4"><?php echo $customer->Kota; ?></div>
<br />
<div class="col-md-3 col-sm-4"><span class="fa fa-phone"> No Telepon</span></div>
<div class="col-md-3 col-sm-4"><?php echo $customer->Telepon; ?></div>
<br />
<div class="col-md-3 col-sm-4"><span class="fa fa-arrows"> Tanggal Lahir</span></div>
<div class="col-md-3 col-sm-4"><?php echo $customer->TanggalLahir; ?></div>
<br />
<div class="col-md-3 col-sm-4"><span class="fa fa-user-secret"> Pekerjaan</span></div>
<div class="col-md-3 col-sm-4"><?php echo $customer->Pekerjaan; ?></div>
<br />
<div class="col-md-3 col-sm-4"><span class="fa fa-at"> Email</span></div>
<div class="col-md-3 col-sm-4"><?php echo $customer->Email; ?></div>
<br />
</div>
</div>
</div>
</div>
<!-- /.col -->
<div class="col-md-3">
<div class="box box-warning">
<div class="box-header with-border">
<h3 class="box-title">Jumlah Transaksi Per Merchant</h3>
<div class="box-tools pull-right">
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
</button>
</div>
<!-- /.box-tools -->
</div>
<!-- /.box-header -->
<div class="box-body">
<?php
foreach ($customerMerchant as $c) {
echo '<div clas="row">';
echo '<div class="col-md-3 col-sm-4">';
echo $c->NamaMerchant;
echo "</div>";
echo '<div class="col-md-3 col-sm-4">';
echo '<strong>';
echo $c->Jumlah;
echo '</strong>';
echo "</div>";
echo "</div>";
}
?>
</div>
<!-- /.box-body -->
</div>
<!-- /.box -->
</div>
<!-- /.col -->
<div class="col-md-6">
<!-- AREA CHART -->
<div class="box box-primary">
<div class="box-header with-border">
<h3 class="box-title">Histori Transaksi</h3>
<div class="box-tools pull-right">
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
</button>
</div>
</div>
<div class="box-body">
<?php echo $historiTransaksi ?>
</div>
<!-- /.box-body -->
</div>
<!-- /.box -->
</div>
</div>
</div><file_sep><?php
class Main extends CI_Controller{
function __construct(){
parent::__construct();
$this->user = unserialize(base64_decode($this->session->userdata('user')));
}
function index(){
$data['user'] = $this->user;
$data['error'] = $this->session->flashdata('err_login');
$data['base_url'] = $this->config->item('base_url');
if (empty($this->session->userdata('user'))){
$this->load->view('login', $data);
}else{
$this->page('admin_ezeelink','dashboard');
}
}
function page($kat,$hal){
$data['user'] = $this->user;
$data['base_url'] = $this->config->item('base_url');
$data['halaman'] = $kat."/".$hal;
$this->load->view('main',$data);
}
function login(){
$data['base_url'] = $this->config->item('base_url');
$this->load->view('login',$data);
}
}
/* Akhir file */<file_sep><?php
class MCustomer extends CI_Model{
private $primary_key = 'PelangganID';
private $table_name = 'pelanggan';
function __construct(){
parent::__construct();
}
function get_paged_list($limit=10, $offset=0, $order_column='', $order_type='asc', $search='', $searchField='Nama')
{
if (empty($searchField))
$searchField = 'Nama';
if(empty($order_column) || empty($order_type) || empty($search) || empty($searchField))
$this->db->order_by($this->primary_key, 'asc');
else
$this->db->order_by($order_column, $order_type);
$this->db->like($searchField, $search);
return $this->db->get($this->table_name, $limit, $offset);
/*
$sql = "";
if(empty($order_column) || empty($order_type) || empty($search))
$sql = "select top ".$limit." * from ".$this->table_name."
where ".$this->primary_key." not in (select top ".$offset." from ".$this->table_name." ) order by ".$this->primary_key." asc ";
else
$sql = "select top ".$limit." * from ".$this->table_name."
where ".$this->primary_key." not in (select top ".$offset." from ".$this->table_name." ) order by ".$order_column." ".$order_type;
return $this->db->query($sql);
*/
}
function count_all(){
return $this->db->count_all($this->table_name);
}
function get_by_id($id){
$sql = "select *, datediff(yy,TanggalLahir,getdate()) as Umur from ".$this->table_name."
where ".$this->primary_key."='".$id."'";
return $this->db->query($sql);
// $this->db->where($this->primary_key, $id);
// return $this->db->get($this->table_name);
}
function save($person){
$this->db->insert($this->table_name, $person);
return $this->db->insert_id();
}
function update($id, $person){
$this->db->where($this->primary_key, $id);
$this->db->update($this->table_name, $person);
}
function delete($id){
$this->db->where($this->primary_key, $id);
$this->db->delete($this->table_name);
}
// demo
function get_dataPekerjaan(){
$sql = "select distinct Pekerjaan from pelanggan";
return $this->db->query($sql);
}
function get_groupPekerjaan(){
$sql = "SELECT COUNT(".$this->primary_key.") as Jumlah FROM ".$this->table_name." GROUP BY Pekerjaan";
return $this->db->query($sql);
}
function get_all_data(){
$sql = "select * from ".$this->table_name;
return $this->db->query($sql);
}
function get_transaksiPelanggan($id){ // mendapatkan informasi histori transaksi
$sql = "select [dbo].[transaksi].*, [dbo].[transaksi_detail].*, [dbo].[merchant].*
from [dbo].[transaksi], [dbo].[transaksi_detail], [dbo].[merchant]
where [transaksi].[PelangganID] = '".$id."' and
[transaksi].[MerchantID] = [merchant].[MerchantID] and
[transaksi].[TransaksiID] = [transaksi_detail].[TransaksiID]";
return $this->db->query($sql);
}
function get_pelangganMerchant($id){ // function untuk mengetahui banyaknya transaksi customer per merchant
$sql= "select merchant.Nama as NamaMerchant, count(merchant.Nama) as Jumlah
from transaksi, transaksi_detail, merchant
where transaksi.PelangganID = '".$id."' and
transaksi.MerchantID = merchant.MerchantID and
transaksi.TransaksiID = transaksi_detail.TransaksiID
group by merchant.Nama";
return $this->db->query($sql);
}
function get_city(){ // mendapatkan informasi jumlah customer per kota
$sql ="select Kota, count(PelangganID) as Jumlah from pelanggan group by Kota order by Jumlah desc";
return $this->db->query($sql);
}
function get_customerTransaksi(){ // mendapatkan informasi jumlah transaksi per customer
$sql = "select pelanggan.Nama as namaCustomer, count(transaksi_detail.TransaksiID) as jumlahTransaksi
from transaksi, transaksi_detail, merchant, pelanggan
where pelanggan.PelangganID = transaksi.PelangganID and
transaksi.MerchantID = merchant.MerchantID and
transaksi.TransaksiID = transaksi_detail.TransaksiID
group by pelanggan.Nama order by jumlahTransaksi desc";
return $this->db->query($sql);
}
function get_ratingPelanggan($jumlahTransaksi){
$transaksiTerbanyak = $this->get_customerTransaksi()->first_row()->jumlahTransaksi;
$range = $transaksiTerbanyak / 5;
$bintang = array($range * 1, $range * 2, $range * 3,$range * 4, $range * 5);
$indeksBintang = 0;
for ($i=0;$i<5;$i++){
$indeksBintang++;
if ($jumlahTransaksi < $bintang[$i]){
break;
}
}
return $indeksBintang;
}
function get_groupRatingPelanggan(){ // mendapatkan data untuk chart rating pelanggan
$bintang_satu = 0;
$bintang_dua = 0;
$bintang_tiga = 0;
$bintang_empat = 0;
$bintang_lima = 0;
$transaksiPelanggan = $this->get_customerTransaksi()->result_array();
foreach ($transaksiPelanggan as $t) {
switch ($this->get_ratingPelanggan($t['jumlahTransaksi'])) {
case '1':
$bintang_satu++;
break;
case '2':
$bintang_dua++;
break;
case '3':
$bintang_tiga++;
break;
case '4':
$bintang_empat++;
break;
case '5':
$bintang_lima++;
break;
}
}
$arrGroup = array($bintang_satu, $bintang_dua, $bintang_tiga, $bintang_empat, $bintang_lima);
return $arrGroup;
}
function get_groupUmur(){
$sql = "select datediff(yy,TanggalLahir,getdate()) as Umur,
COUNT(datediff(yy,TanggalLahir,getdate())) as Jumlah
from ".$this->table_name." group by datediff(yy,TanggalLahir,getdate())";
return $this->db->query($sql);
}
}
?><file_sep><?php
class Transaksi extends CI_Controller{
private $limit = 10;
function __construct(){
parent::__construct();
#load library dan helper
$this->load->library(array('table', 'form_validation'));
$this->load->helper(array('form', 'url'));
$this->load->model('mTransaksi','',true);
$this->file_path = realpath(APPPATH . '../assets');
// cek login
if (empty($this->session->userdata('user'))){
redirect('main');
}
}
function index($offset=0, $order_column='TransaksiID', $order_type='asc'){
if(!empty($this->input->post('limit')))
$this->limit = $this->input->post('limit');
$search = $this->input->post('q');
$data['search'] = $search;
$searchField = $this->input->post('searchField');
$data['searchField'] = $searchField;
if(!empty($search))
$this->limit = $this->mTransaksi->count_all();
if (empty($offset)) $offset = 0;
if (empty($order_column)) $order_column = 'TransaksiID';
if (empty($order_type)) $order_type = 'asc';
$paged_transaksi = $this->mTransaksi->get_paged_list($this->limit, $offset,
$order_column, $order_type, $search, $searchField)->result();
// generate pagination
$this->load->library('pagination');
$config['base_url'] = site_url('transaksi/index/');
$config['total_rows'] = $this->mTransaksi->count_all();
$config['per_page'] = $this->limit;
$config['uri_segment'] = 3;
$config['first_link'] = 'Awal';
$config['last_link'] = 'Akhir';
$config['attributes'] = array('class' => 'li pagination');
$this->pagination->initialize($config);
$data['pagination'] = $this->pagination->create_links();
// generate table data
$this->load->library('table');
$config['attributes']['rel'] = FALSE;
$template = array('table_open' => '<table class="table table-bordered table-striped" id="lineItemTable">');
$this->table->set_template($template);
$this->table->set_empty(" ");
$new_order = ($order_type=='asc'?'desc':'asc');
$this->table->set_heading('<input type="checkbox" name="idxall" value="all" id="checkMaster" onclick="clickAll()">', 'No',
anchor('transaksi/index/'.$offset.'/TransaksiID/'.$new_order,'TransaksiID'),
anchor('transaksi/index/'.$offset.'/MerchantID/'.$new_order,'MerchantID'),
anchor('transaksi/index/'.$offset.'/PelangganID/'.$new_order,'PelangganID'),
anchor('transaksi/index/'.$offset.'/TanggalTransaksi/'.$new_order,'TanggalTransaksi'),
anchor('transaksi/index/'.$offset.'/Kota/'.$new_order,'Kota'),
anchor('transaksi/index/'.$offset.'/Provinsi/'.$new_order,'Provinsi'));
$i=0 + $offset;
foreach ($paged_transaksi as $t) {
$this->table->add_row('<input type="checkbox" name="'.$i.'" value="'.$t->TransaksiID.'" onchange="cek()">',
++$i, $t->TransaksiID,
anchor($this->config->item('base_url').'index.php/merchants/profileMerchant/'.$t->MerchantID, $t->MerchantID),
anchor($this->config->item('base_url').'index.php/customers/profileCustomer/'.$t->PelangganID, $t->PelangganID),
$t->TanggalTransaksi, $t->KotaTransaksi, $t->ProvinsiTransaksi);
}
// ===================================================================
// data jumlah transaksi perbulan (chart)
$selectTahun = $this->input->post('s');
if (empty($selectTahun)){
$transaksiPerBulan = $this->mTransaksi->get_transaksiPerbulan()->result();
$data['selectTahun'] = 1998;
}else{
$transaksiPerBulan = $this->mTransaksi->get_transaksiPerbulan($selectTahun)->result();
$data['selectTahun'] = $selectTahun;
}
$jumlah = array();
foreach ($transaksiPerBulan as $t) {
$jumlah[] = $t->Jumlah;
}
$bulan = array('Januari', 'Februari', 'Maret', 'April', 'Mei', 'Juni', 'Juli', 'Agustus', 'September',
'Oktober', 'November', 'Desember');
$label = array();
foreach ($transaksiPerBulan as $t) {
$label[] = $bulan[$t->Bulan - 1];
}
$data['transaksiPerBulan'] = json_encode($jumlah);
$data['labelPerBulan'] = json_encode($label);
$data['tahun'] = $this->mTransaksi->get_tahun()->result();
// end data jumlah transaksi perbulan (chart)
// ===================================================================
// data jumlah transaksi Per-Nama hari
$selectTahun2 = $this->input->post('sTahunHari');
$data['selectTahun2'] = $selectTahun2;
$selectBulanHari = $this->input->post('sBulanHari');
$data['selectBulanHari'] = $selectBulanHari;
if (empty($selectTahun2) || empty($selectBulanHari))
$groupHariTransaksi = $this->mTransaksi->get_groupHariTransaksi()->result();
else
$groupHariTransaksi = $this->mTransaksi->get_groupHariTransaksi($selectBulanHari, $selectTahun2)->result();
$hari = array();
foreach ($groupHariTransaksi as $h) {
$hari[] = $h->Hari;
}
$hariJumlah = array(); // jumlah transaksi per nama hari
foreach ($groupHariTransaksi as $h) {
$hariJumlah[] = $h->Jumlah;
}
$data['hari'] = json_encode($hari);
$data['hariJumlah'] = json_encode($hariJumlah);
// end data jumlah transaksi Per-Nama hari
// ==================================================================
// data jumlah transaksi bulan sekarang
$data['jumlahTransaksiBulanIni'] = $this->mTransaksi->get_jumlahTransaksiBulanIni()->row();
// -------
// ==================================================================
// data jumlah transaksi perkota
$data['kotaTransaksi'] = $this->mTransaksi->get_kotaTransaksi()->result();
// -------
// ==================================================================
// data kota dengan transaksi terbanyak
$data['highKota'] = $this->mTransaksi->get_kotaTransaksi()->first_row()->Nama;
// -------
// ==================================================================
// data jumlah transaksi per kategori merchant (chart)
$transaksiKategoriMerchant = $this->mTransaksi->get_transaksiKategoriMerchant()->result();
$transaksiKategori = array();
$transaksiKategoriJumlah = array();
foreach ($transaksiKategoriMerchant as $t) {
$transaksiKategori[] = $t->Kategori;
}
foreach ($transaksiKategoriMerchant as $t) {
$transaksiKategoriJumlah[] = $t->Jumlah;
}
$data['transaksiKategori'] = json_encode($transaksiKategori);
$data['transaksiKategoriJumlah'] = json_encode($transaksiKategoriJumlah);
// ------
$data['table'] = $this->table->generate();
$data['base_url'] = $this->config->item('base_url');
$this->load->view('header', $data);
$this->load->view('admin_ezeelink/dataTransaksi', $data);
$this->load->view('footer', $data);
}
function download_csv(){
$this->load->dbutil();
$this->load->helper('file');
$report = $this->mTransaksi->get_all_data();
$delimiter = ",";
$newline = "\r\n";
$new_report = $this->dbutil->csv_from_result($report, $delimiter, $newline);
write_file($this->file_path . '/csv_transaksi/csv_file.xls', $new_report);
$this->load->helper('download');
$data = file_get_contents($this->file_path . '/csv_transaksi/csv_file.xls');
$name = 'Transaksi-'.date('d-m-Y').'.xls';
force_download($name, $data);
}
}
?><file_sep><?php
class MTransaksi extends CI_Model{
private $table_name = 'transaksi';
private $primary_key = 'TransaksiID';
function __construct(){
parent::__construct();
}
function get_paged_list($limit=10, $offset=0, $order_column='', $order_type='asc', $search='', $searchField='TransaksiID')
{
if (empty($searchField))
$searchField = $this->primary_key;
if(empty($order_column) || empty($order_type) || empty($search))
$this->db->order_by($this->primary_key, 'asc');
else
$this->db->order_by($order_column, $order_type);
$this->db->like($searchField, $search);
return $this->db->get($this->table_name, $limit, $offset);
}
function count_all(){
return $this->db->count_all($this->table_name);
}
function get_all_data(){
$sql = "select * from ".$this->table_name;
return $this->db->query($sql);
}
function get_transaksiPerbulan($tahun=1998){
$sql = "";
if(empty($tahun))
$sql = "select MONTH(TanggalTransaksi)as Bulan, COUNT(TanggalTransaksi)as Jumlah
from transaksi where year(TanggalTransaksi) = 1998 group by MONTH(TanggalTransaksi)";
else
$sql = "select MONTH(TanggalTransaksi)as Bulan, COUNT(TanggalTransaksi)as Jumlah
from transaksi where year(TanggalTransaksi) = ".$tahun." group by MONTH(TanggalTransaksi)";
return $this->db->query($sql);
}
function get_tahun(){
$sql = "select distinct YEAR(TanggalTransaksi) as Tahun from transaksi order by Tahun desc";
return $this->db->query($sql);
}
function get_groupHariTransaksi($bulan='',$tahun=''){ // mendapatkan jumlah transaksi dalam suatu nama hari
if (empty($bulan))
$bulan = 'MONTH(TanggalTransaksi)';
if (empty($tahun))
$tahun = 'YEAR(TanggalTransaksi)';
$sql = "SELECT datename(dw,TanggalTransaksi) as Hari,
(CASE WHEN DATENAME(dw, TanggalTransaksi)= 'Sunday' then 1
WHEN DATENAME(dw, TanggalTransaksi)='Monday' THEN 2
WHEN DATENAME(dw, TanggalTransaksi)='Tuesday' THEN 3
WHEN DATENAME(dw, TanggalTransaksi)='Wednesday' THEN 4
WHEN DATENAME(dw, TanggalTransaksi)='Thursday' THEN 5
WHEN DATENAME(dw, TanggalTransaksi)='Friday' THEN 6 ELSE 7 END ) as indexHari,
COUNT(TransaksiID) as Jumlah
FROM transaksi where MONTH(TanggalTransaksi) = ".$bulan." and
YEAR(TanggalTransaksi) = ".$tahun."
group by datename(dw,TanggalTransaksi) order by indexHari";
return $this->db->query($sql);
}
function get_jumlahTransaksiBulanIni(){
$sql = "select count(TransaksiID) as Jumlah from transaksi
where MONTH(TanggalTransaksi) = MONTH(getDate()) and
YEAR(TanggalTransaksi) = YEAR(getDate())";
return $this->db->query($sql);
}
function get_kotaTransaksi(){ // mendapatkan jumlah transaksi perkota
$sql ="select KotaTransaksi as Nama, count(TransaksiID) as Jumlah
from transaksi group by KotaTransaksi order by Jumlah desc";
return $this->db->query($sql);
}
function get_transaksiKategoriMerchant(){ // mendapatkan jumlah transaksi per kategori merchant
$sql = "select kategori_merchant.Nama as Kategori,
COUNT(kategori_merchant.Nama) as Jumlah
from transaksi, merchant, kategori_merchant
where transaksi.MerchantID = merchant.MerchantID and
merchant.KategoriID = kategori_merchant.KategoriID
group by kategori_merchant.Nama";
return $this->db->query($sql);
}
}
?><file_sep><?php
class Merchants extends CI_Controller{
private $limit = 10;
function __construct(){
parent::__construct();
#load library dan helper
$this->load->library(array('table', 'form_validation'));
$this->load->helper(array('form', 'url'));
$this->load->model('mMerchant','',true);
$this->file_path = realpath(APPPATH . '../assets');
// cek login
if (empty($this->session->userdata('user'))){
redirect('main');
}
}
function addMerchant(){
// blablablabla
}
function index($offset=0, $order_column='MerchantID', $order_type='asc'){
if(!empty($this->input->post('limit')))
$this->limit = $this->input->post('limit');
$search = $this->input->post('q');
$data['base_url'] = $this->config->item('base_url');
$data['search'] = $search;
$searchField = $this->input->post('searchField');
$data['searchField'] = $searchField;
if(!empty($search))
$this->limit = $this->mMerchant->count_all();
if (empty($offset)) $offset = 0;
if (empty($order_column)) $order_column = 'MerchantID';
if (empty($order_type)) $order_type = 'asc';
$paged_merchant = $this->mMerchant->get_paged_list($this->limit, $offset, $order_column,
$order_type, $search, $searchField)->result();
// generate pagination
$this->load->library('pagination');
$config['base_url'] = site_url('merchants/index/');
$config['total_rows'] = $this->mMerchant->count_all();
$config['per_page'] = $this->limit;
$config['uri_segment'] = 3;
$config['first_link'] = 'Awal';
$config['last_link'] = 'Akhir';
$config['attributes'] = array('class' => 'li pagination');
$this->pagination->initialize($config);
$data['pagination'] = $this->pagination->create_links();
// generate table data
$this->load->library('table');
$config['attributes']['rel'] = FALSE;
$template = array('table_open' => '<table class="table table-bordered table-striped" id="lineItemTable">');
$this->table->set_template($template);
$this->table->set_empty(" ");
$new_order = ($order_type=='asc'?'desc':'asc');
$this->table->set_heading('<input type="checkbox" name="idxall" value="all" id="checkMaster" onclick="clickAll()">', 'No',
anchor('merchants/index/'.$offset.'/Nama/'.$new_order,'Nama'),
anchor('merchants/index/'.$offset.'/Alamat/'.$new_order,'Alamat'),
anchor('merchants/index/'.$offset.'/Tanggal Daftar/'.$new_order,'TanggalDaftar'),
anchor('merchants/index/'.$offset.'/Kota/'.$new_order,'Kota'),
anchor('merchants/index/'.$offset.'/Telepon/'.$new_order,'Telepon'),
'Aksi');
$i=0 + $offset;
foreach ($paged_merchant as $m) {
$this->table->add_row('<input type="checkbox" name="'.$i.'" value="'.$m->MerchantID.'" onchange="cek()">',
++$i, $m->Nama, $m->Alamat,
$m->TanggalDaftar, $m->Kota, $m->Telepon,
'<a href="'.$this->config->item('base_url').'index.php/merchants/profileMerchant/'.$m->MerchantID.'" class="btn btn-info">Profil</a>'.' '.
'<a href="#"><span class="glyphicon glyphicon-pencil"></span></a>'.' '.
anchor('merchants/delete/'.$m->MerchantID, '<span class="glyphicon glyphicon-remove">', array('onclick' => "return confirm('Apakah Anda Yakin Ingin Menghapus Data Merchant Ini ?')"))
);
}
$data['table'] = $this->table->generate();
if ($this->uri->segment(3) == 'delete_success')
$data['message'] = 'Data Berhasil Dihapus';
else if ($this->uri->segment(3) == 'add_success')
$data['message'] = 'Data Berhasil Ditambah';
else
$data['message'] = '';
$data['jumlahMerchant'] = $this->mMerchant->count_all();
// ==================================================
// data jumlah merchant per kota (chart)
$data['merchantKota'] = $this->mMerchant->get_kota()->result();
// ===================================================
// data kota dengan merchant terbanyak
$data['highKota'] = $this->mMerchant->get_kota()->first_row()->Kota;
// ===================================================
// data jumlah merchant perkategori
$data['merchantKategori'] = $this->mMerchant->get_kategoriMerchant()->result();
$this->load->view('header', $data);
$this->load->view('admin_ezeelink/dataMerchant', $data);
$this->load->view('footer', $data);
}
function download_csv(){
$this->load->dbutil();
$this->load->helper('file');
$report = $this->mMerchant->get_all_data();
$delimiter = ",";
$newline = "\r\n";
$new_report = $this->dbutil->csv_from_result($report, $delimiter, $newline);
write_file($this->file_path . '/csv_merchant/csv_file.xls', $new_report);
$this->load->helper('download');
$data = file_get_contents($this->file_path . '/csv_merchant/csv_file.xls');
$name = 'Merchants-'.date('d-m-Y').'.xls';
force_download($name, $data);
}
function profileMerchant($id){
$data['base_url'] = $this->config->item('base_url');
// ==================================================
// data detail merchant
$data['merchant'] = $this->mMerchant->get_by_id($id)->row();
// ==================================================
// ==================================================
// data table histori transaksi merchant
$merchantTransaksi = $this->mMerchant->get_merchantTransaksi($id)->result();
$this->load->library('table');
$config['attributes']['rel'] = FALSE;
$template = array(
'table_open' => '<table class="table table-bordered table-striped">',
'table_close' => '</table>'
);
$this->table->set_template($template);
$this->table->set_empty(" ");
$this->table->set_heading('No','Nama Customer', 'Produk', 'Harga', 'Kuantitas','Diskon','Tempat','Tanggal');
$i=0;
foreach ($merchantTransaksi as $c) {
$this->table->add_row(++$i, $c->namaCustomer, $c->NamaProduk, $c->HargaPerUnit , $c->Kuantitas,
$c->Diskon, $c->TempatTransaksi, $c->TanggalTransaksi
);
}
$data['historiTransaksi'] = $this->table->generate();
// ==================================================
// data jumlah transaksi merchant
$data['jumlahTransaksi'] = $i;
// ==================================================
// data rating merchant
$data['ratingMerchant'] = $this->mMerchant->get_ratingMerchant($i);
$this->load->view('header', $data);
$this->load->view('admin_ezeelink/profileMerchant', $data);
$this->load->view('footer', $data);
}
}
?> <file_sep><!-- Content Wrapper. Contains page content -->
<div class="content-wrapper">
<!-- Content Header (Page header) -->
<section class="content-header">
<h1>
Profile Merchant
</h1>
<ol class="breadcrumb">
<li><a href="#"><i class="fa fa-dashboard"></i> Home</a></li>
<li><a href="#">Data Merchant</a></li>
<li class="active">Profile Merchant</li>
</ol>
</section>
<!-- Main content -->
<section class="content">
<div class="row">
<div class="col-md-3">
<!-- Profile Image -->
<div class="box box-primary">
<div class="box-body box-profile">
<img class="profile-user-img img-responsive img-circle" src="" alt="Logo Merhant">
<h3 class="profile-username text-center"><?php echo $merchant->Nama; ?></h3>
<p class="text-muted text-center"><?php echo $merchant->Kategori; ?></p>
<ul class="list-group list-group-unbordered">
<li class="list-group-item">
<b>Jumlah Transaksi</b> <label class="pull-right"><?php echo $jumlahTransaksi; ?></label>
</li>
<li class="list-group-item">
<b>Rating</b> <label class="pull-right"><?php echo $ratingMerchant; for ($i=0;$i<$ratingMerchant;$i++){
echo '<span class="glyphicon glyphicon-star"></span>';
} ?></label>
</li>
</ul>
</div>
<!-- /.box-body -->
</div>
<!-- /.box -->
<!-- About Me Box -->
<div class="box box-primary">
<div class="box-header with-border">
<h3 class="box-title">Detail</h3>
</div>
<!-- /.box-header -->
<div class="box-body">
<strong><i class="fa fa-book margin-r-5"></i> Tanggal Daftar</strong>
<p class="text-muted"><?php echo $merchant->TanggalDaftar; ?></p>
<hr>
<strong><i class="fa fa-map-marker margin-r-5"></i> Lokasi</strong>
<p class="text-muted"><?php echo $merchant->Alamat.'\n '.$merchant->Kota.'\n '.$merchant->Provinsi; ?></p>
<hr>
<strong><i class="fa fa-pencil margin-r-5"></i> Kategori</strong>
<p><?php echo $merchant->Kategori; ?></p>
<hr>
<strong><i class="fa fa-file-text-o margin-r-5"></i> Catatan</strong>
<p><?php echo $merchant->Catatan; ?></p>
</div>
<!-- /.box-body -->
</div>
<!-- /.box -->
</div>
<!-- /.col -->
<div class="col-md-9">
<div class="nav-tabs-custom">
<ul class="nav nav-tabs">
<li class="active"><a href="#activity" data-toggle="tab">Histori Transaksi</a></li>
<li><a href="#timeline" data-toggle="tab">Program</a></li>
</ul>
<div class="tab-content">
<div class="active tab-pane" id="activity">
<?php echo $historiTransaksi; ?>
</div>
<!-- /.tab-pane -->
<div class="tab-pane" id="timeline">
</div>
<!-- /.tab-pane -->
</div>
<!-- /.tab-content -->
</div>
<!-- /.nav-tabs-custom -->
</div>
<!-- /.col -->
</div>
<!-- /.row -->
</section>
<!-- /.content -->
</div>
<!-- /.content-wrapper --><file_sep> <?php
if (!defined('BASEPATH'))
{exit('No direct script access allowed');}
class mAuthent extends CI_Model
{
function __construct()
{
parent::__construct();
}
function get_user($userid)
{
$sql = $this->db->query("select *
from ss_user
where
user_id='$userid' and status=1");
return $sql->row_array();
}
}
/* End of file mAuthent.php */
/* Location: ./application/models/mAuthent.php */<file_sep><div class="content-wrapper">
<!-- Content Header (Page header) -->
<section class="content-header">
<h1>
Data Merchant
<small>ezeelink</small>
</h1>
<ol class="breadcrumb">
<li><a href="#"><i class="fa fa-dashboard"></i> Home</a></li>
<li><a href="#">Data Customer</a></li>
</ol>
</section>
<!-- Main content -->
<section class="content">
<div class="row">
<div class="col-md-3 col-sm-6 col-xs-12">
<div class="info-box">
<span class="info-box-icon bg-gray"><i class="fa fa-bank"></i></span>
<div class="info-box-content">
<span class="info-box-text">Jumlah Merchant</span>
<span class="info-box-number"><?php echo $jumlahMerchant; ?></span>
</div>
<!-- /.info-box-content -->
</div>
<!-- /.info-box -->
</div>
<!-- /.col -->
<div class="col-md-3 col-sm-6 col-xs-12">
<div class="info-box">
<span class="info-box-icon bg-blue"><i class="fa fa-flag-o"></i></span>
<div class="info-box-content">
<span class="info-box-text">Kota Dengan</span>
<span class="info-box-text">Merchant Terbanyak</span>
<span class="info-box-number"><?php echo $highKota; ?></span>
</div>
<!-- /.info-box-content -->
</div>
<!-- /.info-box -->
</div>
<!-- /.col -->
<div class="col-md-3 col-sm-6 col-xs-12">
<div class="info-box">
<span class="info-box-icon bg-red"><i class="fa fa-send"></i></span>
<div class="info-box-content">
<span class="info-box-text">Merchant </span>
<span class="info-box-text">Di Jakarta</span>
<span class="info-box-number"></span>
</div>
<!-- /.info-box-content -->
</div>
<!-- /.info-box -->
</div>
<!-- /.col -->
<div class="col-md-3 col-sm-6 col-xs-12">
<div class="info-box">
<span class="info-box-icon bg-green"><i class="fa fa-reply"></i></span>
<div class="info-box-content">
<span class="info-box-text">Merchant </span>
<span class="info-box-text">Luar Jakarta</span>
<span class="info-box-number"></span>
</div>
<!-- /.info-box-content -->
</div>
<!-- /.info-box -->
</div>
<!-- /.col -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-md-3">
<a href="#analisis" class="btn btn-default">Analisis Data Merchant</a>
</div>
</div>
<br />
<div class="row">
<div class="col-xs-12">
<div class="box">
<div class="box-header">
<h3 class="box-title">Data Merhcant</h3>
</div>
<!-- /.box-header -->
<div class="box-body">
<form action="<?php echo $base_url;?>index.php/merchants" method="post">
<label>Cari Berdasarkan</label>
<select class="form-control" name="searchField">
<option value="Nama">Nama</option>
<option value="Alamat">Alamat</option>
<option value="Kota">Kota</option>
</select>
<br />
<div class="input-group">
<input type="text" name="q" class="form-control" placeholder="Cari">
<span class="input-group-btn">
<button type="submit" name="search" id="search-btn" class="btn btn-flat"><i class="fa fa-search"></i></button>
</span>
</div>
<br />
<?php
if (!empty($search)){
echo 'Menampilkan Data Transaksi Dengan <strong>'.$searchField.'</strong> <i>"'.$search.'"</i> ';
echo '<br />';
}
?>
</form>
<!-- <table id="example1" class="table table-bordered table-striped">
</table> -->
<label><?php //echo $message; ?></label>
<br />
<form action="<?php echo $base_url;?>index.php/customers/download_pdf" method="get">
<div class="col-md-6">
<input type="submit" id="import" class="btn btn-warnig" value="Import Data yang Dipilih Ke File PDF">
<input type="text" id="jumlah" name="jumlah" value=0 size=5 disabled>
<a href="<?php echo $base_url;?>index.php/customers/download_csv" class="btn btn-info">
<i class="fa fa-file-excel-o"> Import Semua Data Ke File .XLS</i>
</a>
</div>
</form>
<form action="<?php echo $base_url;?>index.php/merchants" method="post">
<div class="col-md-6">
<label>Tampilkan Per</label>
<select name="limit">
<option value="10">10</option>
<option value="20">20</option>
<option value="99999999">Semua</option>
</select>
<label>Data</label>
<input type="submit" class="btn btn-default" value="Ok">
</div>
<br />
<div><?php echo $table;?></div>
</form>
<script type="text/javascript">
function clickAll(){
var checked = false;
if (document.getElementById("checkMaster").checked == true)
checked = true;
var tbl = document.getElementById("lineItemTable");
var rowLen = tbl.rows.length;
for (var idx = 1; idx < rowLen; idx++) {
var row = tbl.rows[idx];
var cell = row.cells[0];
var node = cell.lastChild;
node.checked = checked;
}
cek();
}
function cek(){
var tbl = document.getElementById("lineItemTable");
var rowLen = tbl.rows.length;
var jumlah = 0;
for (var idx = 1; idx < rowLen; idx++) {
var row = tbl.rows[idx];
var cell = row.cells[0];
var node = cell.lastChild;
if (node.checked == true)
jumlah++;
document.getElementById('jumlah').value = jumlah;
}
}
</script>
</div>
<!-- /.box-body -->
</div>
<!-- /.box -->
</div>
<!-- /.col -->
<script type="text/javascript" src="<?php echo $base_url;?>assets/jquery.js"></script>
<script type="text/javascript" src="<?php echo $base_url;?>assets/Chart.js"></script>
<script type="text/javascript">
$(document).ready(function(){
// ==============================================================
// pie chart data kota merchant
var data = [ <?php
$i = 0;
$rand = array('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f');
foreach($merchantKota as $c){
{
$color = '#'.$rand[rand(0,15)].$rand[rand(0,15)].$rand[rand(0,15)].$rand[rand(0,15)].$rand[rand(0,15)].$rand[rand(0,15)];
$i++;
$dataPie = '{
value: '.$c->Jumlah.',
color:"'.$color.'",
highlight: "'.$color.'",
label: "'.$c->Kota.'"
},';
echo $dataPie;
}
}
?>
{
value: 0,
color: "",
highlight: "",
label: ""
}
]
var ctx = document.getElementById("pieChart").getContext("2d");
var chart = new Chart(ctx).Pie(data, {
//String - A legend template
legendTemplate : "<ul class=\"<%=name.toLowerCase()%>-legend\"><% for (var i=0; i<segments.length; i++){%><span style=\"background-color:<%=segments[i].fillColor%>\"><%if(segments[i].label){%><%=segments[i].label%><%}%></span><%}%></ul>"
});
document.getElementById("pieLegend").innerHTML = chart.generateLegend();
// ==============================================================
// pie chart data kategori merchant
var data2 = [ <?php
$i = 0;
$rand = array('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f');
foreach($merchantKategori as $c){
{
$color = '#'.$rand[rand(0,15)].$rand[rand(0,15)].$rand[rand(0,15)].$rand[rand(0,15)].$rand[rand(0,15)].$rand[rand(0,15)];
$i++;
$dataPie = '{
value: '.$c->Jumlah.',
color:"'.$color.'",
highlight: "'.$color.'",
label: "'.$c->Nama.'"
},';
echo $dataPie;
}
}
?>
{
value: 0,
color: "",
highlight: "",
label: ""
}
]
var ctx2 = document.getElementById("pieChartKategori").getContext("2d");
var chart2 = new Chart(ctx2).Pie(data2, {
//String - A legend template
legendTemplate : "<ul class=\"<%=name.toLowerCase()%>-legend\"><% for (var i=0; i<segments.length; i++){%><span style=\"background-color:<%=segments[i].fillColor%>\"><%if(segments[i].label){%><%=segments[i].label%><%}%></span><%}%></ul>"
});
document.getElementById("pieLegendKategori").innerHTML = chart2.generateLegend();
});
</script>
<div class="col-md-6" id="analisis">
<!-- PIE CHART -->
<div class="box box-info">
<div class="box-header with-border">
<h3 class="box-title">Chart Analisis Kota Merchant</h3>
<div class="box-tools pull-right">
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
</button>
</div>
</div>
<div class="box-body">
<div class="col-md-3">
<div id="pieLegend"></div>
</div>
<div class="chart">
<canvas id="pieChart" style="height:250px"></canvas>
</div>
</div>
</div>
<!-- /.box -->
</div>
<div class="col-md-6">
<!-- PIE CHART -->
<div class="box box-info">
<div class="box-header with-border">
<h3 class="box-title">Chart Analisis Kategori Merchant</h3>
<div class="box-tools pull-right">
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
</button>
</div>
</div>
<div class="box-body">
<div class="col-md-3">
<div id="pieLegendKategori"></div>
</div>
<div class="chart">
<canvas id="pieChartKategori" style="height:250px"></canvas>
</div>
</div>
</div>
<!-- /.box -->
</div>
</div>
<!-- /.row -->
</section>
<!-- /.content -->
</div><file_sep><?php $this->load->view('header'); ?>
<!-- Content Wrapper. Contains page content -->
<?php $this->load->view($halaman); ?>
<!-- /.content-wrapper -->
<?php $this->load->view('footer'); ?><file_sep><?php
class MMerchant extends CI_Model{
private $primary_key = 'MerchantID';
private $table_name = 'merchant';
function __construct(){
parent::__construct();
}
function get_paged_list($limit=10, $offset=0, $order_column='', $order_type='asc', $search='', $searchField='Nama')
{
if (empty($searchField))
$searchField = 'Nama';
if(empty($order_column) || empty($order_type) || empty($search))
$this->db->order_by($this->primary_key, 'asc');
else
$this->db->order_by($order_column, $order_type);
$this->db->like($searchField, $search);
return $this->db->get($this->table_name, $limit, $offset);
}
function get_all_data(){
$sql = "select * from ".$this->table_name;
return $this->db->query($sql);
}
function get_by_id($id){
$sql = "select kategori_merchant.Nama as Kategori, merchant.*
from kategori_merchant, merchant
where merchant.KategoriID = kategori_merchant.KategoriID and
merchant.MerchantID =".$id;
return $this->db->query($sql);
}
function count_all(){
return $this->db->count_all($this->table_name);
}
function delete($id){
$this->db->where($this->primary_key, $id);
$this->db->delete($this->table_name);
}
function get_merchantTransaksi($id){ // function untuk mendapatkan data histori transaksi di suatu merchant
$sql = "select [dbo].[pelanggan].[Nama] as namaCustomer,
[dbo].[transaksi].[TempatTransaksi], [dbo].[transaksi_detail].[Diskon], [dbo].[transaksi_detail].[Kuantitas],
[dbo].[transaksi].[TanggalTransaksi], [dbo].[produk].*
from [dbo].[transaksi], [dbo].[transaksi_detail], [dbo].[merchant], [dbo].[pelanggan], [dbo].[produk]
where [merchant].[MerchantID] = 4 and
[transaksi].[MerchantID] = [merchant].[MerchantID] and
[transaksi].[TransaksiID] = [transaksi_detail].[TransaksiID] and
[transaksi].[PelangganID] = [pelanggan].[PelangganID] and
[transaksi].[TransaksiID] = [transaksi_detail].[TransaksiID] and
[transaksi_detail].[ProdukID] = [produk].[ProdukID]
order by [dbo].[transaksi].[TanggalTransaksi] desc";
return $this->db->query($sql);
}
function get_jumlahMerchantTransaksi(){ // function untuk mendapatkan jumlah transaksi merchant
$sql = "select merchant.Nama as namaMerchant, count(transaksi_detail.TransaksiID) as jumlahTransaksi
from transaksi, transaksi_detail, merchant, pelanggan
where pelanggan.PelangganID = transaksi.PelangganID and
transaksi.MerchantID = merchant.MerchantID and
transaksi.TransaksiID = transaksi_detail.TransaksiID
group by merchant.Nama order by jumlahTransaksi desc";
return $this->db->query($sql);
}
function get_ratingMerchant($jumlahTransaksi){
$transaksiTerbanyak = $this->get_jumlahMerchantTransaksi()->first_row()->jumlahTransaksi;
$range = $transaksiTerbanyak / 5;
$bintang = array($range * 1, $range * 2, $range * 3,$range * 4, $range * 5);
$indeksBintang = 0;
for ($i=0;$i<5;$i++){
$indeksBintang++;
if ($jumlahTransaksi < $bintang[$i]){
break;
}
}
return $indeksBintang;
}
function get_kota(){ // function untuk mendapatkan jumlah merchant per kota
$sql = "select Kota, count(MerchantID) as Jumlah from merchant group by Kota order by Jumlah desc";
return $this->db->query($sql);
}
function get_kategoriMerchant(){ // mendapatkan data group kategori merchant
$sql = "select kategori_merchant.Nama, COUNT(merchant.KategoriID)as Jumlah
from merchant, kategori_merchant
where merchant.KategoriID = kategori_merchant.KategoriID group by kategori_merchant.Nama
order by Jumlah desc";
return $this->db->query($sql);
}
}
?><file_sep><div class="content-wrapper">
<!-- Content Header (Page header) -->
<section class="content-header">
<h1>
Data Transaksi
<small>ezeelink</small>
</h1>
<ol class="breadcrumb">
<li><a href="#"><i class="fa fa-dashboard"></i> Home</a></li>
<li><a href="#">Data Transaksi</a></li>
</ol>
</section>
<section class="content">
<div class="row">
<div class="col-md-3 col-sm-6 col-xs-12">
<div class="info-box">
<span class="info-box-icon bg-green"><i class="fa fa-dollar"></i></span>
<div class="info-box-content">
<span class="info-box-text">Jumlah Transaksi</span>
<span class="info-box-text">Bulan Sekarang</span>
<span class="info-box-number"><?php echo $jumlahTransaksiBulanIni->Jumlah; ?></span>
</div>
<!-- /.info-box-content -->
</div>
<!-- /.info-box -->
</div>
<!-- /.col -->
<div class="col-md-3 col-sm-6 col-xs-12">
<div class="info-box">
<span class="info-box-icon bg-yellow"><i class="fa fa-flag-o"></i></span>
<div class="info-box-content">
<span class="info-box-text">Kota Dengan</span>
<span class="info-box-text">Transaksi Terbanyak</span>
<span class="info-box-number"><?php echo $highKota; ?></span>
</div>
<!-- /.info-box-content -->
</div>
<!-- /.info-box -->
</div>
<!-- /.col -->
<div class="col-md-3 col-sm-6 col-xs-12">
<div class="info-box">
<span class="info-box-icon bg-red"><i class="fa fa-send"></i></span>
<div class="info-box-content">
<span class="info-box-text">Jumlah Collect </span>
<span class="info-box-text">Point</span>
<span class="info-box-number"></span>
</div>
<!-- /.info-box-content -->
</div>
<!-- /.info-box -->
</div>
<!-- /.col -->
<div class="col-md-3 col-sm-6 col-xs-12">
<div class="info-box">
<span class="info-box-icon bg-aqua"><i class="fa fa-reply"></i></span>
<div class="info-box-content">
<span class="info-box-text">Jumlah Radeem </span>
<span class="info-box-text">Point </span>
<span class="info-box-number"></span>
</div>
<!-- /.info-box-content -->
</div>
<!-- /.info-box -->
</div>
<!-- /.col -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-md-3">
<a href="#analisis" class="btn btn-default">Analisis Data Transaksi</a>
</div>
</div>
<br />
<div class="row">
<div class="col-xs-12">
<div class="box">
<div class="box-header">
<h3 class="box-title">Data Transaksi</h3>
</div>
<!-- /.box-header -->
<div class="box-body">
<form action="<?php echo $base_url;?>index.php/transaksi" method="post">
<label>Cari Berdasarkan</label>
<select class="form-control" name="searchField">
<option value="TransaksiID">ID Transaksi</option>
<option value="MerchantID">ID Merchant</option>
<option value="PelangganID">ID Pelanggan</option>
</select>
<br />
<div class="input-group">
<input type="text" name="q" class="form-control" placeholder="Cari">
<span class="input-group-btn">
<button type="submit" name="search" id="search-btn" class="btn btn-flat"><i class="fa fa-search"></i></button>
</span>
</div>
<br />
<?php
if (!empty($search)){
echo 'Menampilkan Data Transaksi Dengan <strong>'.$searchField.'</strong> <i>"'.$search.'"</i> ';
echo '<br />';
}
?>
</form>
<!-- <table id="example1" class="table table-bordered table-striped">
</table> -->
<label><?php //echo $message; ?></label>
<br />
<form action="<?php echo $base_url;?>index.php/customers/download_pdf" method="get">
<div class="col-md-6">
<input type="submit" id="import" class="btn btn-warnig" value="Import Data yang Dipilih Ke File PDF">
<input type="text" id="jumlah" name="jumlah" value=0 size=5 disabled>
<a href="<?php echo $base_url;?>index.php/customers/download_csv" class="btn btn-info">
<i class="fa fa-file-excel-o"> Import Semua Data Ke File .XLS</i>
</a>
</div>
</form>
<form action="<?php echo $base_url;?>index.php/transaksi" method="post">
<div class="col-md-6">
<label>Tampilkan Per</label>
<select name="limit">
<option value="10">10</option>
<option value="20">20</option>
<option value="99999999">Semua</option>
</select>
<label>Data</label>
<input type="submit" class="btn btn-default" value="Ok">
</div>
<br />
<div><?php echo $table;?></div>
</form>
<script type="text/javascript">
function clickAll(){
var checked = false;
if (document.getElementById("checkMaster").checked == true)
checked = true;
var tbl = document.getElementById("lineItemTable");
var rowLen = tbl.rows.length;
for (var idx = 1; idx < rowLen; idx++) {
var row = tbl.rows[idx];
var cell = row.cells[0];
var node = cell.lastChild;
node.checked = checked;
}
cek();
}
function cek(){
var tbl = document.getElementById("lineItemTable");
var rowLen = tbl.rows.length;
var jumlah = 0;
for (var idx = 1; idx < rowLen; idx++) {
var row = tbl.rows[idx];
var cell = row.cells[0];
var node = cell.lastChild;
if (node.checked == true)
jumlah++;
document.getElementById('jumlah').value = jumlah;
}
}
</script>
<ul class="pagination pagination-sm"><li><?php echo $pagination;?></li></ul>
<br />
</div>
<!-- /.box-body -->
</div>
<!-- /.box -->
</div>
<!-- /.col -->
<div class="col-md-6" id="analisis">
<!-- LINE CHART -->
<div class="box box-info">
<div class="box-header with-border">
<h3 class="box-title">Analisis Jumlah Transaksi Per-bulan</h3>
<div class="box-tools pull-right">
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
</button>
</div>
</div>
<div class="box-body">
<form action="<?php echo $base_url;?>index.php/transaksi#analisis" method="post">
<div class="form-group">
<label>Tahun</label>
<select class="form-control" name="s">
<?php
foreach ($tahun as $t) {
echo '<option value="'.$t->Tahun.'">'.$t->Tahun.'</option>';
}
?>
</select>
<br />
<input class="btn btn-success" type="submit" value="Submit" />
</div>
</form>
<div class="chart">
<canvas id="testChart" style="height:250px"></canvas>
<div class="col-sm-3">
<div id="testLegend" style="font-size:25px"></div>
</div>
</div>
</div>
</div>
<!-- /.box -->
</div>
<div class="col-md-6" id="analisis">
<!-- LINE CHART -->
<div class="box box-info">
<div class="box-header with-border">
<h3 class="box-title">Analisis Jumlah Transaksi Per-hari</h3>
<div class="box-tools pull-right">
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
</button>
</div>
</div>
<div class="box-body">
<form action="<?php echo $base_url;?>index.php/transaksi#analisis" method="post">
<div class="form-group">
<label>Tahun</label>
<select class="form-control" name="sTahunHari">
<?php
foreach ($tahun as $t) {
echo '<option value="'.$t->Tahun.'">'.$t->Tahun.'</option>';
}
?>
</select>
<br />
<select class="form-control" name="sBulanHari">
<option value="0">- Semua -</option>
<option value="1">Januari</option>
<option value="2">February</option>
<option value="3">Maret</option>
<option value="4">April</option>
<option value="5">Mei</option>
<option value="6">Juni</option>
<option value="7">Juli</option>
<option value="8">Agustus</option>
<option value="9">September</option>
<option value="10">Okotober</option>
<option value="11">November</option>
<option value="12">Desember</option>
</select>
<br />
<input class="btn btn-success" type="submit" value="Submit" />
</div>
</form>
<div class="chart">
<canvas id="hariChart" style="height:250px"></canvas>
<div class="col-sm-6">
<div id="hariLegend" style="font-size:25px"></div>
</div>
</div>
</div>
</div>
<!-- /.box -->
</div>
<div class="col-md-6" id="analisis">
<!-- LINE CHART -->
<div class="box box-info">
<div class="box-header with-border">
<h3 class="box-title">Analisis Jumlah Transaksi Per-kota</h3>
<div class="box-tools pull-right">
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
</button>
</div>
</div>
<div class="box-body">
<div class="chart">
<canvas id="kotaChart" style="height:250px"></canvas>
<div class="col-sm-3">
<div id="kotaLegend" style="font-size:25px"></div>
</div>
</div>
</div>
</div>
<!-- /.box -->
</div>
<div class="col-md-6">
<!-- BAR CHART -->
<div class="box box-info">
<div class="box-header with-border">
<h3 class="box-title">Chart Analisis Rating Pelanggan</h3>
<div class="box-tools pull-right">
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
</button>
</div>
</div>
<div class="box-body">
<div class="col-md-3">
<div id="transaksiKategoriLegend"></div>
</div>
<div class="chart">
<canvas id="transaksiKategoriChart" style="height:250px"></canvas>
</div>
</div>
</div>
<!-- /.box -->
</div>
</div>
<!-- /.box -->
<script type="text/javascript" src="<?php echo $base_url;?>assets/jquery.js"></script>
<script type="text/javascript" src="<?php echo $base_url;?>assets/Chart.js"></script>
<script type="text/javascript">
$(document).ready(function(){
// =============================================================
// line chart data jumlah transaksi per bulan
var data = {
labels: <?php echo $labelPerBulan; ?>,
datasets: [
{
label: <?php echo '"'.$selectTahun.'"' ?>,
fillColor: "rgb(102, 153, 255)",
strokeColor: "rgb(102, 153, 255)",
pointColor: "rgb(102, 153, 255)",
pointStrokeColor: "#fff",
pointHighlightFill: "#fff",
pointHighlightStroke: "rgba(220,220,220,1)",
data: <?php echo $transaksiPerBulan; ?>
}
]
};
var ctx = document.getElementById("testChart").getContext("2d");
var chart = new Chart(ctx).Line(data);
document.getElementById("testLegend").innerHTML = chart.generateLegend();
// =============================================================
// line chart data jumlah transaksi
var data2 = {
labels: <?php echo $hari; ?>,
datasets: [
{
label: <?php echo '"'.$selectTahun2.' Bulan Ke-'.$selectBulanHari.'"' ?>,
fillColor: "rgb(194, 194, 163)",
strokeColor: "rgb(194, 194, 163)",
pointColor: "rgb(194, 194, 163)",
pointStrokeColor: "#fff",
pointHighlightFill: "#fff",
pointHighlightStroke: "rgba(220,220,220,1)",
data: <?php echo $hariJumlah; ?>
}
]
};
var ctx2 = document.getElementById("hariChart").getContext("2d");
var chart2 = new Chart(ctx2).Bar(data2);
document.getElementById("hariLegend").innerHTML = chart2.generateLegend();
// ==============================================================
// pie chart jumlah merchant per kota
var data3 = [ <?php
$i = 0;
$rand = array('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f');
foreach($kotaTransaksi as $c){
{
$color = '#'.$rand[rand(0,15)].$rand[rand(0,15)].$rand[rand(0,15)].$rand[rand(0,15)].$rand[rand(0,15)].$rand[rand(0,15)];
$i++;
$dataPie = '{
value: '.$c->Jumlah.',
color:"'.$color.'",
highlight: "'.$color.'",
label: "'.$c->Nama.'"
},';
echo $dataPie;
}
}
?>
{
value: 0,
color: "",
highlight: "",
label: ""
}
]
var ctx3 = document.getElementById("kotaChart").getContext("2d");
var chart3 = new Chart(ctx3).Pie(data3, {
//String - A legend template
legendTemplate : "<ul class=\"<%=name.toLowerCase()%>-legend\"><% for (var i=0; i<segments.length; i++){%><span style=\"background-color:<%=segments[i].fillColor%>\"><%if(segments[i].label){%><%=segments[i].label%><%}%></span><%}%></ul>"
});
// document.getElementById("pieLegendKategori").innerHTML = chart3.generateLegend();
// ====================================================
// bar chart jumlah transaksi perkategori merchant
var data4 = {
labels: <?php echo $transaksiKategori ?>,
datasets: [
{
fillColor: "rgb(102, 255, 204)",
strokeColor: "#3b8bba",
pointColor: "Blue",
pointStrokeColor: "#FFF",
pointHighlightFill: "#FFF",
pointHighlightStroke: "rgba(220,220,220,1)",
data: <?php echo $transaksiKategoriJumlah; ?> }
]
};
var ctx4 = document.getElementById("transaksiKategoriChart").getContext("2d");
var chart4 = new Chart(ctx4).Bar(data4);
});
</script>
</section>
</div>
|
d58e43d61d57184123ce971dbaf5d51e315dd803
|
[
"PHP"
] | 14 |
PHP
|
ahmadryu007/ezee
|
4f1c4a8ea3b73c00d93de5764bdff0dabf3a34b0
|
111418a9ffa0122bb4384d32a265c6c3bf817911
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.